diff --git a/.circleci/config.yml b/.circleci/config.yml index 7ad79354..04a03eb2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -26,17 +26,12 @@ jobs: - checkout - restore_cache: keys: - - v3-pkg-cache + - v4-pkg-cache - run: name: tools command: | export PATH="$GOBIN:$PATH" make get_tools - - run: - name: dependencies - command: | - export PATH="$GOBIN:$PATH" - make get_vendor_deps - run: name: binaries command: | @@ -48,7 +43,7 @@ jobs: - bin - profiles - save_cache: - key: v3-pkg-cache + key: v4-pkg-cache paths: - /go/pkg - save_cache: @@ -62,7 +57,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: @@ -78,7 +73,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: @@ -87,12 +82,6 @@ jobs: set -ex export PATH="$GOBIN:$PATH" make lint - - run: - name: check_dep - command: | - set -ex - export PATH="$GOBIN:$PATH" - make check_dep test_abci_apps: <<: *defaults @@ -100,7 +89,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: @@ -117,7 +106,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: @@ -132,7 +121,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils @@ -147,7 +136,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: mkdir -p /tmp/logs @@ -157,7 +146,7 @@ jobs: for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do id=$(basename "$pkg") - go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log" + GO111MODULE=on go test -v -timeout 5m -mod=readonly -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log" done - persist_to_workspace: root: /tmp/workspace @@ -172,7 +161,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: @@ -219,7 +208,7 @@ jobs: - attach_workspace: at: /tmp/workspace - restore_cache: - key: v3-pkg-cache + key: v4-pkg-cache - restore_cache: key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - run: @@ -278,15 +267,15 @@ jobs: - run: name: Build dependencies command: | - make get_tools get_vendor_deps + make get_tools - persist_to_workspace: root: . paths: - "release-version.source" - save_cache: - key: v1-release-deps-{{ .Branch }}-{{ .Revision }} - paths: - - "vendor" + key: v2-release-deps-{{ checksum "go.sum" }} + paths: + - "/go/pkg/mod" build_artifacts: <<: *defaults @@ -295,7 +284,7 @@ jobs: - checkout - restore_cache: keys: - - v1-release-deps-{{ .Branch }}-{{ .Revision }} + - v2-release-deps-{{ checksum "go.sum" }} - attach_workspace: at: /tmp/workspace - run: diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d4f55392..e1863c78 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,5 +1,12 @@ - + + +* [ ] Referenced an issue explaining the need for the change * [ ] Updated all relevant documentation in docs * [ ] Updated all code comments where relevant * [ ] Wrote tests diff --git a/.gitignore b/.gitignore index 1cf9cdb9..10ee3099 100644 --- a/.gitignore +++ b/.gitignore @@ -35,6 +35,7 @@ shunit2 addrbook.json */vendor +.vendor-new/ */.glide .terraform terraform.tfstate diff --git a/CHANGELOG.md b/CHANGELOG.md index 52a926ae..068d99c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,147 @@ # Changelog +## v0.31.7 + +*June 3, 2019* + +This releases fixes a regression in the mempool introduced in v0.31.6. +The regression caused the invalid committed txs to be proposed in blocks over and +over again. + +### BUG FIXES: +- [mempool] \#3699 Remove all committed txs from the mempool. + This reverts the change from v0.31.6 where we only remove valid txs from the mempool. + Note this means malicious proposals can cause txs to be dropped from the + mempools of other nodes by including them in blocks before they are valid. + See \#3322. + +## v0.31.6 + +*May 31st, 2019* + +This release contains many fixes and improvements, primarily for p2p functionality. +It also fixes a security issue in the mempool package. + +With this release, Tendermint now supports [boltdb](https://github.com/etcd-io/bbolt), although +in experimental mode. Feel free to try and report to us any findings/issues. +Note also that the build tags for compiling CLevelDB have changed. + +Special thanks to external contributors on this release: +@guagualvcha, @james-ray, @gregdhill, @climber73, @yutianwu, +@carlosflrs, @defunctzombie, @leoluk, @needkane, @CrocdileChan + +### BREAKING CHANGES: + +* Go API + - [libs/common] Removed deprecated `PanicSanity`, `PanicCrisis`, + `PanicConsensus` and `PanicQ` + - [mempool, state] [\#2659](https://github.com/tendermint/tendermint/issues/2659) `Mempool` now an interface that lives in the mempool package. + See issue and PR for more details. + - [p2p] [\#3346](https://github.com/tendermint/tendermint/issues/3346) `Reactor#InitPeer` method is added to `Reactor` interface + - [types] [\#1648](https://github.com/tendermint/tendermint/issues/1648) `Commit#VoteSignBytes` signature was changed + +### FEATURES: +- [node] [\#2659](https://github.com/tendermint/tendermint/issues/2659) Add `node.Mempool()` method, which allows you to access mempool +- [libs/db] [\#3604](https://github.com/tendermint/tendermint/pull/3604) Add experimental support for bolt db (etcd's fork of bolt) (@CrocdileChan) + +### IMPROVEMENTS: +- [cli] [\#3585](https://github.com/tendermint/tendermint/issues/3585) Add `--keep-addr-book` option to `unsafe_reset_all` cmd to not + clear the address book (@climber73) +- [cli] [\#3160](https://github.com/tendermint/tendermint/issues/3160) Add + `--config=` option to `testnet` cmd (@gregdhill) +- [cli] [\#3661](https://github.com/tendermint/tendermint/pull/3661) Add + `--hostname-suffix`, `--hostname` and `--random-monikers` options to `testnet` + cmd for greater peer address/identity generation flexibility. +- [crypto] [\#3672](https://github.com/tendermint/tendermint/issues/3672) Return more info in the `AddSignatureFromPubKey` error +- [cs/replay] [\#3460](https://github.com/tendermint/tendermint/issues/3460) Check appHash for each block +- [libs/db] [\#3611](https://github.com/tendermint/tendermint/issues/3611) Conditional compilation + * Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or + use `make build_c` / `make install_c` (full instructions can be found at + https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support) + * Use `boltdb` tag to compile Tendermint with bolt db +- [node] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Return an error if `persistent_peers` list is invalid (except + when IP lookup fails) +- [p2p] [\#3463](https://github.com/tendermint/tendermint/pull/3463) Do not log "Can't add peer's address to addrbook" error for a private peer (@guagualvcha) +- [p2p] [\#3531](https://github.com/tendermint/tendermint/issues/3531) Terminate session on nonce wrapping (@climber73) +- [pex] [\#3647](https://github.com/tendermint/tendermint/pull/3647) Dial seeds, if any, instead of crawling peers first (@defunctzombie) +- [rpc] [\#3534](https://github.com/tendermint/tendermint/pull/3534) Add support for batched requests/responses in JSON RPC +- [rpc] [\#3362](https://github.com/tendermint/tendermint/issues/3362) `/dial_seeds` & `/dial_peers` return errors if addresses are + incorrect (except when IP lookup fails) + +### BUG FIXES: +- [consensus] [\#3067](https://github.com/tendermint/tendermint/issues/3067) Fix replay from appHeight==0 with validator set changes (@james-ray) +- [consensus] [\#3304](https://github.com/tendermint/tendermint/issues/3304) Create a peer state in consensus reactor before the peer + is started (@guagualvcha) +- [lite] [\#3669](https://github.com/tendermint/tendermint/issues/3669) Add context parameter to RPC Handlers in proxy routes (@yutianwu) +- [mempool] [\#3322](https://github.com/tendermint/tendermint/issues/3322) When a block is committed, only remove committed txs from the mempool +that were valid (ie. `ResponseDeliverTx.Code == 0`) +- [p2p] [\#3338](https://github.com/tendermint/tendermint/issues/3338) Ensure `RemovePeer` is always called before `InitPeer` (upon a peer + reconnecting to our node) +- [p2p] [\#3532](https://github.com/tendermint/tendermint/issues/3532) Limit the number of attempts to connect to a peer in seed mode + to 16 (as a result, the node will stop retrying after a 35 hours time window) +- [p2p] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Allow inbound peers to be persistent, including for seed nodes. +- [pex] [\#3603](https://github.com/tendermint/tendermint/pull/3603) Dial seeds when addrbook needs more addresses (@defunctzombie) + +### OTHERS: +- [networks] fixes ansible integration script (@carlosflrs) + +## v0.31.5 + +*April 16th, 2019* + +This release fixes a regression from v0.31.4 where, in existing chains that +were upgraded, `/validators` could return an empty validator set. This is true +for almost all heights, given the validator set remains the same. + +Special thanks to external contributors on this release: +@brapse, @guagualvcha, @dongsam, @phucc + +### IMPROVEMENTS: + +- [libs/common] `CMap`: slight optimization in `Keys()` and `Values()` (@phucc) +- [gitignore] gitignore: add .vendor-new (@dongsam) + +### BUG FIXES: + +- [state] [\#3537](https://github.com/tendermint/tendermint/pull/3537#issuecomment-482711833) + `LoadValidators`: do not return an empty validator set +- [blockchain] [\#3457](https://github.com/tendermint/tendermint/issues/3457) + Fix "peer did not send us anything" in `fast_sync` mode when under high pressure + +## v0.31.4 + +*April 12th, 2019* + +This release fixes a regression from v0.31.3 which used the peer's `SocketAddr` to add the peer to +the address book. This swallowed the peer's self-reported port which is important in case of reconnect. +It brings back `NetAddress()` to `NodeInfo` and uses it instead of `SocketAddr` for adding peers. +Additionally, it improves response time on the `/validators` or `/status` RPC endpoints. +As a side-effect it makes these RPC endpoint more difficult to DoS and fixes a performance degradation in `ExecCommitBlock`. +Also, it contains an [ADR](https://github.com/tendermint/tendermint/pull/3539) that proposes decoupling the +responsibility for peer behaviour from the `p2p.Switch` (by @brapse). + +Special thanks to external contributors on this release: +@brapse, @guagualvcha, @mydring + +### IMPROVEMENTS: + +- [p2p] [\#3463](https://github.com/tendermint/tendermint/pull/3463) Do not log "Can't add peer's address to addrbook" error for a private peer +- [p2p] [\#3547](https://github.com/tendermint/tendermint/pull/3547) Fix a couple of annoying typos (@mdyring) + +### BUG FIXES: + +- [docs] [\#3514](https://github.com/tendermint/tendermint/issues/3514) Fix block.Header.Time description (@melekes) +- [p2p] [\#2716](https://github.com/tendermint/tendermint/issues/2716) Check if we're already connected to peer right before dialing it (@melekes) +- [p2p] [\#3545](https://github.com/tendermint/tendermint/issues/3545) Add back `NetAddress()` to `NodeInfo` and use it instead of peer's `SocketAddr()` when adding a peer to the `PEXReactor` (potential fix for [\#3532](https://github.com/tendermint/tendermint/issues/3532)) +- [state] [\#3438](https://github.com/tendermint/tendermint/pull/3438) + Persist validators every 100000 blocks even if no changes to the set + occurred (@guagualvcha). This + 1) Prevents possible DoS attack using `/validators` or `/status` RPC + endpoints. Before response time was growing linearly with height if no + changes were made to the validator set. + 2) Fixes performance degradation in `ExecCommitBlock` where we call + `LoadValidators` for each `Evidence` in the block. + ## v0.31.3 *April 1st, 2019* @@ -8,6 +150,12 @@ This release includes two security sensitive fixes: it ensures generated private keys are valid, and it prevents certain DNS lookups that would cause the node to panic if the lookup failed. +### BREAKING CHANGES: +* Go API + - [crypto/secp256k1] [\#3439](https://github.com/tendermint/tendermint/issues/3439) + The `secp256k1.GenPrivKeySecp256k1` function has changed to guarantee that it returns a valid key, which means it + will return a different private key than in previous versions for the same secret. + ### BUG FIXES: - [crypto/secp256k1] [\#3439](https://github.com/tendermint/tendermint/issues/3439) @@ -35,7 +183,7 @@ Special thanks to external contributors on this release: * Apps * Go API -- [libs/autofile] [\#3504](https://github.com/tendermint/tendermint/issues/3504) Remove unused code in autofile package. Deleted functions: `Group.Search`, `Group.FindLast`, `GroupReader.ReadLine`, `GroupReader.PushLine`, `MakeSimpleSearchFunc` (@guagualvcha) + - [libs/autofile] [\#3504](https://github.com/tendermint/tendermint/issues/3504) Remove unused code in autofile package. Deleted functions: `Group.Search`, `Group.FindLast`, `GroupReader.ReadLine`, `GroupReader.PushLine`, `MakeSimpleSearchFunc` (@guagualvcha) * Blockchain Protocol diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 33f5f27c..b1b09e20 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,14 +1,28 @@ -## v0.31.2 +## v0.31.8 ** ### BREAKING CHANGES: * CLI/RPC/Config + - [cli] \#3613 Switch from golang/dep to Go Modules to resolve dependencies: + It is recommended to switch to Go Modules if your project has tendermint as + a dependency. Read more on Modules here: + https://github.com/golang/go/wiki/Modules + - [rpc] \#3616 Improve `/block_results` response format (`results.DeliverTx` + -> `results.deliver_tx`). See docs for details. * Apps + - [abci] \#1859 `ResponseCheckTx`, `ResponseDeliverTx`, `ResponseBeginBlock`, + and `ResponseEndBlock` now include `Events` instead of `Tags`. Each `Event` + contains a `type` and a list of `attributes` (list of key-value pairs) + allowing for inclusion of multiple distinct events in each response. * Go API + - [libs/db] [\#3632](https://github.com/tendermint/tendermint/pull/3632) Removed deprecated `LevelDBBackend` const + If you have `db_backend` set to `leveldb` in your config file, please + change it to `goleveldb` or `cleveldb`. + - [p2p] \#3521 Remove NewNetAddressStringWithOptionalID * Blockchain Protocol @@ -17,17 +31,9 @@ ### FEATURES: ### IMPROVEMENTS: -- [p2p] [\#3463](https://github.com/tendermint/tendermint/pull/3463) Do not log "Can't add peer's address to addrbook" error for a private peer +- [p2p] \#3666 Add per channel telemetry to improve reactor observability +- [rpc] [\#3686](https://github.com/tendermint/tendermint/pull/3686) `HTTPClient#Call` returns wrapped errors, so a caller could use `errors.Cause` to retrieve an error code. (@wooparadog) ### BUG FIXES: - -- [state] [\#3438](https://github.com/tendermint/tendermint/pull/3438) - Persist validators every 100000 blocks even if no changes to the set - occurred (@guagualvcha). This - 1) Prevents possible DoS attack using `/validators` or `/status` RPC - endpoints. Before response time was growing linearly with height if no - changes were made to the validator set. - 2) Fixes performance degradation in `ExecCommitBlock` where we call - `LoadValidators` for each `Evidence` in the block. -- [p2p] \#2716 Check if we're already connected to peer right before dialing it (@melekes) -- [docs] \#3514 Fix block.Header.Time description (@melekes) +- [libs/db] \#3717 Fixed the BoltDB backend's Batch.Delete implementation (@Yawning) +- [libs/db] \#3718 Fixed the BoltDB backend's Get and Iterator implementation (@Yawning) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3dab3b8a..e68e6d1e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,6 +4,14 @@ Thank you for considering making contributions to Tendermint and related reposit Please follow standard github best practices: fork the repo, branch from the tip of develop, make some commits, and submit a pull request to develop. See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with! +Before making a pull request, please open an issue describing the +change you would like to make. If an issue for your change already exists, +please comment on it that you will submit a pull request. Be sure to reference the issue in the opening +comment of your pull request. If your change is substantial, you will be asked +to write a more detailed design document in the form of an +Architectural Decision Record (ie. see [here](./docs/architecture/)) before submitting code +changes. + Please make sure to use `gofmt` before every commit - the easiest way to do this is have your editor run it for you upon saving a file. ## Forking @@ -34,7 +42,7 @@ Please don't make Pull Requests to `master`. ## Dependencies -We use [dep](https://github.com/golang/dep) to manage dependencies. +We use [go modules](https://github.com/golang/go/wiki/Modules) to manage dependencies. That said, the master branch of every Tendermint repository should just build with `go get`, which means they should be kept up-to-date with their @@ -42,18 +50,17 @@ dependencies so we can get away with telling people they can just `go get` our software. Since some dependencies are not under our control, a third party may break our -build, in which case we can fall back on `dep ensure` (or `make -get_vendor_deps`). Even for dependencies under our control, dep helps us to +build, in which case we can fall back on `go mod tidy`. Even for dependencies under our control, go helps us to keep multiple repos in sync as they evolve. Anything with an executable, such as apps, tools, and the core, should use dep. -Run `dep status` to get a list of vendor dependencies that may not be +Run `go list -u -m all` to get a list of dependencies that may not be up-to-date. When updating dependencies, please only update the particular dependencies you -need. Instead of running `dep ensure -update`, which will update anything, +need. Instead of running `go get -u=patch`, which will update anything, specify exactly the dependency you want to update, eg. -`dep ensure -update github.com/tendermint/go-amino`. +`GO111MODULE=on go get -u github.com/tendermint/go-amino@master`. ## Vagrant @@ -105,10 +112,14 @@ removed from the header in rpc responses as well. ## Branching Model and Release -All repos should adhere to the branching model: http://nvie.com/posts/a-successful-git-branching-model/. +We follow a variant of [git flow](http://nvie.com/posts/a-successful-git-branching-model/). This means that all pull-requests should be made against develop. Any merge to master constitutes a tagged release. +Note all pull requests should be squash merged except for merging to master and +merging master back to develop. This keeps the commit history clean and makes it +easy to reference the pull request where a change was introduced. + ### Development Procedure: - the latest state of development is on `develop` - `develop` must never fail `make test` @@ -120,13 +131,13 @@ master constitutes a tagged release. ### Pull Merge Procedure: - ensure pull branch is based on a recent develop - run `make test` to ensure that all tests pass -- merge pull request +- squash merge pull request - the `unstable` branch may be used to aggregate pull merges before fixing tests ### Release Procedure: - start on `develop` - run integration tests (see `test_integrations` in Makefile) -- prepare changelog: +- prepare release in a pull request against develop (to be squash merged): - copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` - run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues @@ -135,23 +146,15 @@ master constitutes a tagged release. the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh ` - reset the `CHANGELOG_PENDING.md` -- bump versions -- push to release/vX.X.X to run the extended integration tests on the CI -- merge to master -- merge master back to develop + - bump versions +- push latest develop with prepared release details to release/vX.X.X to run the extended integration tests on the CI +- if necessary, make pull requests against release/vX.X.X and squash merge them +- merge to master (don't squash merge!) +- merge master back to develop (don't squash merge!) ### Hotfix Procedure: -- start on `master` -- checkout a new branch named hotfix-vX.X.X -- make the required changes - - these changes should be small and an absolute necessity - - add a note to CHANGELOG.md -- bump versions -- push to hotfix-vX.X.X to run the extended integration tests on the CI -- merge hotfix-vX.X.X to master -- merge hotfix-vX.X.X to develop -- delete the hotfix-vX.X.X branch +- follow the normal development and release procedure without any differences ## Testing diff --git a/DOCKER/Dockerfile.develop b/DOCKER/Dockerfile.develop index 5759e765..943b2129 100644 --- a/DOCKER/Dockerfile.develop +++ b/DOCKER/Dockerfile.develop @@ -19,7 +19,6 @@ RUN mkdir -p /go/src/github.com/tendermint/tendermint && \ git clone https://github.com/tendermint/tendermint . && \ git checkout develop && \ make get_tools && \ - make get_vendor_deps && \ make install && \ cd - && \ rm -rf /go/src/github.com/tendermint/tendermint && \ diff --git a/DOCKER/Dockerfile.testing b/DOCKER/Dockerfile.testing index b82afe2a..a658aeb1 100644 --- a/DOCKER/Dockerfile.testing +++ b/DOCKER/Dockerfile.testing @@ -1,4 +1,4 @@ -FROM golang:1.10.1 +FROM golang:1.12 # Grab deps (jq, hexdump, xxd, killall) diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index 56c79ab6..00000000 --- a/Gopkg.lock +++ /dev/null @@ -1,552 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "UT" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - digest = "1:093bf93a65962e8191e3e8cd8fc6c363f83d43caca9739c906531ba7210a9904" - name = "github.com/btcsuite/btcd" - packages = ["btcec"] - pruneopts = "UT" - revision = "ed77733ec07dfc8a513741138419b8d9d3de9d2d" - -[[projects]] - digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf" - name = "github.com/btcsuite/btcutil" - packages = [ - "base58", - "bech32", - ] - pruneopts = "UT" - revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70" - name = "github.com/fortytw2/leaktest" - packages = ["."] - pruneopts = "UT" - revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e" - version = "v1.2.0" - -[[projects]] - digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" - name = "github.com/fsnotify/fsnotify" - packages = ["."] - pruneopts = "UT" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - version = "v1.4.7" - -[[projects]] - digest = "1:fdf5169073fb0ad6dc12a70c249145e30f4058647bea25f0abd48b6d9f228a11" - name = "github.com/go-kit/kit" - packages = [ - "log", - "log/level", - "log/term", - "metrics", - "metrics/discard", - "metrics/internal/lv", - "metrics/prometheus", - ] - pruneopts = "UT" - revision = "4dc7be5d2d12881735283bcab7352178e190fc71" - version = "v0.6.0" - -[[projects]] - digest = "1:31a18dae27a29aa074515e43a443abfd2ba6deb6d69309d8d7ce789c45f34659" - name = "github.com/go-logfmt/logfmt" - packages = ["."] - pruneopts = "UT" - revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" - version = "v0.3.0" - -[[projects]] - digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d" - name = "github.com/go-stack/stack" - packages = ["."] - pruneopts = "UT" - revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a" - version = "v1.8.0" - -[[projects]] - digest = "1:95e1006e41c641abd2f365dfa0f1213c04da294e7cd5f0bf983af234b775db64" - name = "github.com/gogo/protobuf" - packages = [ - "gogoproto", - "jsonpb", - "proto", - "protoc-gen-gogo/descriptor", - "sortkeys", - "types", - ] - pruneopts = "UT" - revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c" - version = "v1.2.1" - -[[projects]] - digest = "1:239c4c7fd2159585454003d9be7207167970194216193a8a210b8d29576f19c9" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "c823c79ea1570fb5ff454033735a8e68575d1d0f" - version = "v1.3.0" - -[[projects]] - branch = "master" - digest = "1:4a0c6bb4805508a6287675fac876be2ac1182539ca8a32468d8128882e9d5009" - name = "github.com/golang/snappy" - packages = ["."] - pruneopts = "UT" - revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" - -[[projects]] - digest = "1:43dd08a10854b2056e615d1b1d22ac94559d822e1f8b6fcc92c1a1057e85188e" - name = "github.com/gorilla/websocket" - packages = ["."] - pruneopts = "UT" - revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b" - version = "v1.2.0" - -[[projects]] - digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8" - name = "github.com/hashicorp/hcl" - packages = [ - ".", - "hcl/ast", - "hcl/parser", - "hcl/scanner", - "hcl/strconv", - "hcl/token", - "json/parser", - "json/scanner", - "json/token", - ] - pruneopts = "UT" - revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" - version = "v1.0.0" - -[[projects]] - digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" - name = "github.com/inconshreveable/mousetrap" - packages = ["."] - pruneopts = "UT" - revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - version = "v1.0" - -[[projects]] - digest = "1:a74b5a8e34ee5843cd6e65f698f3e75614f812ff170c2243425d75bc091e9af2" - name = "github.com/jmhodges/levigo" - packages = ["."] - pruneopts = "UT" - revision = "853d788c5c416eaaee5b044570784a96c7a26975" - version = "v1.0.0" - -[[projects]] - branch = "master" - digest = "1:a64e323dc06b73892e5bb5d040ced475c4645d456038333883f58934abbf6f72" - name = "github.com/kr/logfmt" - packages = ["."] - pruneopts = "UT" - revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" - -[[projects]] - digest = "1:53e8c5c79716437e601696140e8b1801aae4204f4ec54a504333702a49572c4f" - name = "github.com/magiconair/properties" - packages = [ - ".", - "assert", - ] - pruneopts = "UT" - revision = "c2353362d570a7bfa228149c62842019201cfb71" - version = "v1.8.0" - -[[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "UT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - pruneopts = "UT" - revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" - version = "v1.1.2" - -[[projects]] - digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e" - name = "github.com/pelletier/go-toml" - packages = ["."] - pruneopts = "UT" - revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194" - version = "v1.2.0" - -[[projects]] - digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "UT" - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "UT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:26663fafdea73a38075b07e8e9d82fc0056379d2be8bb4e13899e8fda7c7dd23" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/internal", - "prometheus/promhttp", - ] - pruneopts = "UT" - revision = "abad2d1bd44235a26707c172eab6bca5bf2dbad3" - version = "v0.9.1" - -[[projects]] - branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "UT" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - digest = "1:db712fde5d12d6cdbdf14b777f0c230f4ff5ab0be8e35b239fc319953ed577a4" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "UT" - revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6" - -[[projects]] - branch = "master" - digest = "1:ef74914912f99c79434d9c09658274678bc85080ebe3ab32bec3940ebce5e1fc" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs", - ] - pruneopts = "UT" - revision = "185b4288413d2a0dd0806f78c90dde719829e5ae" - -[[projects]] - digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c" - name = "github.com/rcrowley/go-metrics" - packages = ["."] - pruneopts = "UT" - revision = "e2704e165165ec55d062f5919b4b29494e9fa790" - -[[projects]] - digest = "1:b0c25f00bad20d783d259af2af8666969e2fc343fa0dc9efe52936bbd67fb758" - name = "github.com/rs/cors" - packages = ["."] - pruneopts = "UT" - revision = "9a47f48565a795472d43519dd49aac781f3034fb" - version = "v1.6.0" - -[[projects]] - digest = "1:6a4a11ba764a56d2758899ec6f3848d24698d48442ebce85ee7a3f63284526cd" - name = "github.com/spf13/afero" - packages = [ - ".", - "mem", - ] - pruneopts = "UT" - revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd" - version = "v1.1.2" - -[[projects]] - digest = "1:08d65904057412fc0270fc4812a1c90c594186819243160dc779a402d4b6d0bc" - name = "github.com/spf13/cast" - packages = ["."] - pruneopts = "UT" - revision = "8c9545af88b134710ab1cd196795e7f2388358d7" - version = "v1.3.0" - -[[projects]] - digest = "1:7ffc0983035bc7e297da3688d9fe19d60a420e9c38bef23f845c53788ed6a05e" - name = "github.com/spf13/cobra" - packages = ["."] - pruneopts = "UT" - revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b" - version = "v0.0.1" - -[[projects]] - digest = "1:68ea4e23713989dc20b1bded5d9da2c5f9be14ff9885beef481848edd18c26cb" - name = "github.com/spf13/jwalterweatherman" - packages = ["."] - pruneopts = "UT" - revision = "4a4406e478ca629068e7768fc33f3f044173c0a6" - version = "v1.0.0" - -[[projects]] - digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" - name = "github.com/spf13/pflag" - packages = ["."] - pruneopts = "UT" - revision = "298182f68c66c05229eb03ac171abe6e309ee79a" - version = "v1.0.3" - -[[projects]] - digest = "1:f8e1a678a2571e265f4bf91a3e5e32aa6b1474a55cb0ea849750cc177b664d96" - name = "github.com/spf13/viper" - packages = ["."] - pruneopts = "UT" - revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7" - version = "v1.0.0" - -[[projects]] - digest = "1:7e8d267900c7fa7f35129a2a37596e38ed0f11ca746d6d9ba727980ee138f9f6" - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require", - ] - pruneopts = "UT" - revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" - version = "v1.2.1" - -[[projects]] - branch = "master" - digest = "1:59483b8e8183f10ab21a85ba1f4cbb4a2335d48891801f79ed7b9499f44d383c" - name = "github.com/syndtr/goleveldb" - packages = [ - "leveldb", - "leveldb/cache", - "leveldb/comparer", - "leveldb/errors", - "leveldb/filter", - "leveldb/iterator", - "leveldb/journal", - "leveldb/memdb", - "leveldb/opt", - "leveldb/storage", - "leveldb/table", - "leveldb/util", - ] - pruneopts = "UT" - revision = "6b91fda63f2e36186f1c9d0e48578defb69c5d43" - -[[projects]] - digest = "1:ad9c4c1a4e7875330b1f62906f2830f043a23edb5db997e3a5ac5d3e6eadf80a" - name = "github.com/tendermint/go-amino" - packages = ["."] - pruneopts = "UT" - revision = "dc14acf9ef15f85828bfbc561ed9dd9d2a284885" - version = "v0.14.1" - -[[projects]] - branch = "master" - digest = "1:f4edb30d5ff238e2abba10457010f74cd55ae20bbda8c54db1a07155fa020490" - name = "golang.org/x/crypto" - packages = [ - "bcrypt", - "blowfish", - "chacha20poly1305", - "curve25519", - "ed25519", - "ed25519/internal/edwards25519", - "hkdf", - "internal/chacha20", - "internal/subtle", - "nacl/box", - "nacl/secretbox", - "openpgp/armor", - "openpgp/errors", - "poly1305", - "ripemd160", - "salsa20/salsa", - ] - pruneopts = "UT" - revision = "8dd112bcdc25174059e45e07517d9fc663123347" - -[[projects]] - digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1" - name = "golang.org/x/net" - packages = [ - "context", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "netutil", - "trace", - ] - pruneopts = "UT" - revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f" - -[[projects]] - branch = "master" - digest = "1:6f86e2f2e2217cd4d74dec6786163cf80e4d2b99adb341ecc60a45113b844dca" - name = "golang.org/x/sys" - packages = [ - "cpu", - "unix", - ] - pruneopts = "UT" - revision = "7e31e0c00fa05cb5fbf4347b585621d6709e19a4" - -[[projects]] - digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:56b0bca90b7e5d1facf5fbdacba23e4e0ce069d25381b8e2f70ef1e7ebfb9c1a" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - pruneopts = "UT" - revision = "b69ba1387ce2108ac9bc8e8e5e5a46e7d5c72313" - -[[projects]] - digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74" - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "codes", - "connectivity", - "credentials", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/channelz", - "internal/grpcrand", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - "transport", - ] - pruneopts = "UT" - revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" - version = "v1.13.0" - -[[projects]] - digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "UT" - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/btcsuite/btcd/btcec", - "github.com/btcsuite/btcutil/base58", - "github.com/btcsuite/btcutil/bech32", - "github.com/fortytw2/leaktest", - "github.com/go-kit/kit/log", - "github.com/go-kit/kit/log/level", - "github.com/go-kit/kit/log/term", - "github.com/go-kit/kit/metrics", - "github.com/go-kit/kit/metrics/discard", - "github.com/go-kit/kit/metrics/prometheus", - "github.com/go-logfmt/logfmt", - "github.com/gogo/protobuf/gogoproto", - "github.com/gogo/protobuf/jsonpb", - "github.com/gogo/protobuf/proto", - "github.com/gogo/protobuf/types", - "github.com/golang/protobuf/proto", - "github.com/golang/protobuf/ptypes/timestamp", - "github.com/gorilla/websocket", - "github.com/jmhodges/levigo", - "github.com/magiconair/properties/assert", - "github.com/pkg/errors", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "github.com/rcrowley/go-metrics", - "github.com/rs/cors", - "github.com/spf13/cobra", - "github.com/spf13/viper", - "github.com/stretchr/testify/assert", - "github.com/stretchr/testify/require", - "github.com/syndtr/goleveldb/leveldb", - "github.com/syndtr/goleveldb/leveldb/errors", - "github.com/syndtr/goleveldb/leveldb/iterator", - "github.com/syndtr/goleveldb/leveldb/opt", - "github.com/tendermint/go-amino", - "golang.org/x/crypto/bcrypt", - "golang.org/x/crypto/chacha20poly1305", - "golang.org/x/crypto/curve25519", - "golang.org/x/crypto/ed25519", - "golang.org/x/crypto/hkdf", - "golang.org/x/crypto/nacl/box", - "golang.org/x/crypto/nacl/secretbox", - "golang.org/x/crypto/openpgp/armor", - "golang.org/x/crypto/ripemd160", - "golang.org/x/net/context", - "golang.org/x/net/netutil", - "google.golang.org/grpc", - "google.golang.org/grpc/credentials", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index 505f0da4..00000000 --- a/Gopkg.toml +++ /dev/null @@ -1,93 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true -# -########################################################### - -# Allow only patch releases for serialization libraries -[[constraint]] - name = "github.com/tendermint/go-amino" - version = "~0.14.1" - -[[constraint]] - name = "github.com/gogo/protobuf" - version = "~1.2.1" - -[[constraint]] - name = "github.com/golang/protobuf" - version = "~1.3.0" - -# Allow only minor releases for other libraries -[[constraint]] - name = "github.com/go-kit/kit" - version = "^0.6.0" - -[[constraint]] - name = "github.com/gorilla/websocket" - version = "^1.2.0" - -[[constraint]] - name = "github.com/rs/cors" - version = "^1.6.0" - -[[constraint]] - name = "github.com/pkg/errors" - version = "^0.8.0" - -[[constraint]] - name = "github.com/spf13/cobra" - version = "^0.0.1" - -[[constraint]] - name = "github.com/spf13/viper" - version = "^1.0.0" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "^1.2.1" - -[[constraint]] - name = "google.golang.org/grpc" - version = "^1.13.0" - -[[constraint]] - name = "github.com/fortytw2/leaktest" - version = "^1.2.0" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = "^0.9.1" - -[[constraint]] - name = "github.com/jmhodges/levigo" - version = "^1.0.0" - -################################### -## Repos which don't have releases. - -## - github.com/btcsuite/btcd -## - golang.org/x/crypto -## - github.com/btcsuite/btcutil -## - github.com/rcrowley/go-metrics -## - golang.org/x/net - -[prune] - go-tests = true - unused-packages = true diff --git a/Makefile b/Makefile index 7c2ce1d9..1980ac86 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,5 @@ GOTOOLS = \ github.com/mitchellh/gox \ - github.com/golang/dep/cmd/dep \ github.com/golangci/golangci-lint/cmd/golangci-lint \ github.com/gogo/protobuf/protoc-gen-gogo \ github.com/square/certstrap @@ -8,13 +7,15 @@ GOBIN?=${GOPATH}/bin PACKAGES=$(shell go list ./...) OUTPUT?=build/tendermint +export GO111MODULE = on + INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf BUILD_TAGS?='tendermint' -BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" +BUILD_FLAGS = -mod=readonly -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" all: check build test install -check: check_tools get_vendor_deps +check: check_tools ######################################## ### Build Tendermint @@ -23,16 +24,16 @@ build: CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ build_c: - CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o $(OUTPUT) ./cmd/tendermint/ + CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" -o $(OUTPUT) ./cmd/tendermint/ build_race: CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint install: - CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint + CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint install_c: - CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" ./cmd/tendermint + CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" ./cmd/tendermint ######################################## ### Protobuf @@ -56,10 +57,10 @@ protoc_abci: abci/types/types.pb.go protoc_proto3types: types/proto3/block.pb.go build_abci: - @go build -i ./abci/cmd/... + @go build -mod=readonly -i ./abci/cmd/... install_abci: - @go install ./abci/cmd/... + @go install -mod=readonly ./abci/cmd/... ######################################## ### Distribution @@ -85,11 +86,6 @@ update_tools: @echo "--> Updating tools" ./scripts/get_tools.sh -#Update dependencies -get_vendor_deps: - @echo "--> Running dep" - @dep ensure - #For ABCI and libs get_protoc: @# https://github.com/google/protobuf/releases @@ -132,7 +128,7 @@ clean_certs: rm -f db/remotedb/::.crt db/remotedb/::.key test_libs: gen_certs - go test -tags gcc $(PACKAGES) + go test -tags clevedb boltdb $(PACKAGES) make clean_certs grpc_dbserver: @@ -192,7 +188,6 @@ test_p2p: test_integrations: make build_docker_test_image make get_tools - make get_vendor_deps make install make test_cover make test_apps @@ -254,10 +249,6 @@ rpc-docs: cat rpc/core/slate_header.txt > $(DESTINATION) godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's,/src/target,https://github.com/tendermint/tendermint/tree/master/rpc/core,' >> $(DESTINATION) -check_dep: - dep status >> /dev/null - !(grep -n branch Gopkg.toml) - ########################################################### ### Docker image @@ -270,7 +261,7 @@ build-docker: ### Local testnet using docker # Build linux binary on other platforms -build-linux: get_tools get_vendor_deps +build-linux: get_tools GOOS=linux GOARCH=amd64 $(MAKE) build build-docker-localnode: @@ -312,4 +303,4 @@ build-slate: # To avoid unintended conflicts with file names, always add to .PHONY # unless there is a reason not to. # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all build_c install_c test_with_deadlock cleanup_after_test_with_deadlock lint +.PHONY: check build build_race build_abci dist install install_abci check_tools get_tools update_tools draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all build_c install_c test_with_deadlock cleanup_after_test_with_deadlock lint diff --git a/UPGRADING.md b/UPGRADING.md index eccb954d..5a77e072 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -3,6 +3,24 @@ This guide provides steps to be followed when you upgrade your applications to a newer version of Tendermint Core. + +## v0.32.0 + +### Config Changes + +If you have `db_backend` set to `leveldb` in your config file, please change it +to `goleveldb` or `cleveldb`. + +## v0.31.6 + +There are no breaking changes in this release except Go API of p2p and +mempool packages. Hovewer, if you're using cleveldb, you'll need to change +the compilation tag: + +Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or +use `make build_c` / `make install_c` (full instructions can be found at +https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support) + ## v0.31.0 This release contains a breaking change to the behaviour of the pubsub system. diff --git a/Vagrantfile b/Vagrantfile index da4f8ac3..67de7429 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -57,6 +57,6 @@ Vagrant.configure("2") do |config| # get all deps and tools, ready to install/test su - vagrant -c 'source /home/vagrant/.bash_profile' - su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools && make get_vendor_deps' + su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools' SHELL end diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 955baefb..0c28813f 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -84,14 +84,21 @@ func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { } else { key, value = tx, tx } + app.state.db.Set(prefixKey(key), value) app.state.Size += 1 - tags := []cmn.KVPair{ - {Key: []byte("app.creator"), Value: []byte("Cosmoshi Netowoko")}, - {Key: []byte("app.key"), Value: key}, + events := []types.Event{ + { + Type: "app", + Attributes: []cmn.KVPair{ + {Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko")}, + {Key: []byte("key"), Value: key}, + }, + }, } - return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags} + + return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events} } func (app *KVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx { diff --git a/abci/tests/test_app/test.sh b/abci/tests/test_app/test.sh index 230c9416..c0bdace2 100755 --- a/abci/tests/test_app/test.sh +++ b/abci/tests/test_app/test.sh @@ -3,6 +3,8 @@ set -e # These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it +export GO111MODULE=on + # Get the directory of where this script is. SOURCE="${BASH_SOURCE[0]}" while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done @@ -13,13 +15,13 @@ cd "$DIR" echo "RUN COUNTER OVER SOCKET" # test golang counter -ABCI_APP="counter" go run ./*.go +ABCI_APP="counter" go run -mod=readonly ./*.go echo "----------------------" echo "RUN COUNTER OVER GRPC" # test golang counter via grpc -ABCI_APP="counter --abci=grpc" ABCI="grpc" go run ./*.go +ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go echo "----------------------" # test nodejs counter diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go index 762111b6..904b1641 100644 --- a/abci/types/messages_test.go +++ b/abci/types/messages_test.go @@ -8,6 +8,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" + cmn "github.com/tendermint/tendermint/libs/common" ) @@ -21,8 +22,13 @@ func TestMarshalJSON(t *testing.T) { Code: 1, Data: []byte("hello"), GasWanted: 43, - Tags: []cmn.KVPair{ - {Key: []byte("pho"), Value: []byte("bo")}, + Events: []Event{ + { + Type: "testEvent", + Attributes: []cmn.KVPair{ + {Key: []byte("pho"), Value: []byte("bo")}, + }, + }, }, } b, err = json.Marshal(&r1) @@ -82,8 +88,13 @@ func TestWriteReadMessage2(t *testing.T) { Data: []byte(phrase), Log: phrase, GasWanted: 10, - Tags: []cmn.KVPair{ - {Key: []byte("abc"), Value: []byte("def")}, + Events: []Event{ + { + Type: "testEvent", + Attributes: []cmn.KVPair{ + {Key: []byte("abc"), Value: []byte("def")}, + }, + }, }, }, // TODO: add the rest diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index b09213a5..a7455b52 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -61,7 +61,7 @@ func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{0} + return fileDescriptor_types_62f0c59aeb977f78, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -483,7 +483,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} } func (m *RequestEcho) String() string { return proto.CompactTextString(m) } func (*RequestEcho) ProtoMessage() {} func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{1} + return fileDescriptor_types_62f0c59aeb977f78, []int{1} } func (m *RequestEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -529,7 +529,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{2} + return fileDescriptor_types_62f0c59aeb977f78, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -571,7 +571,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{3} + return fileDescriptor_types_62f0c59aeb977f78, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +634,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } func (*RequestSetOption) ProtoMessage() {} func (*RequestSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{4} + return fileDescriptor_types_62f0c59aeb977f78, []int{4} } func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -692,7 +692,7 @@ func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{5} + return fileDescriptor_types_62f0c59aeb977f78, []int{5} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -770,7 +770,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{6} + return fileDescriptor_types_62f0c59aeb977f78, []int{6} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -841,7 +841,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } func (*RequestBeginBlock) ProtoMessage() {} func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{7} + return fileDescriptor_types_62f0c59aeb977f78, []int{7} } func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -909,7 +909,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{8} + return fileDescriptor_types_62f0c59aeb977f78, []int{8} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -956,7 +956,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } func (*RequestDeliverTx) ProtoMessage() {} func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{9} + return fileDescriptor_types_62f0c59aeb977f78, []int{9} } func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1003,7 +1003,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } func (*RequestEndBlock) ProtoMessage() {} func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{10} + return fileDescriptor_types_62f0c59aeb977f78, []int{10} } func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1049,7 +1049,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{11} + return fileDescriptor_types_62f0c59aeb977f78, []int{11} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1102,7 +1102,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{12} + return fileDescriptor_types_62f0c59aeb977f78, []int{12} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1555,7 +1555,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{13} + return fileDescriptor_types_62f0c59aeb977f78, []int{13} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1602,7 +1602,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{14} + return fileDescriptor_types_62f0c59aeb977f78, []int{14} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1648,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{15} + return fileDescriptor_types_62f0c59aeb977f78, []int{15} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1692,7 +1692,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{16} + return fileDescriptor_types_62f0c59aeb977f78, []int{16} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1771,7 +1771,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } func (*ResponseSetOption) ProtoMessage() {} func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{17} + return fileDescriptor_types_62f0c59aeb977f78, []int{17} } func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1833,7 +1833,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{18} + return fileDescriptor_types_62f0c59aeb977f78, []int{18} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1896,7 +1896,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{19} + return fileDescriptor_types_62f0c59aeb977f78, []int{19} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1989,17 +1989,17 @@ func (m *ResponseQuery) GetCodespace() string { } type ResponseBeginBlock struct { - Tags []common.KVPair `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Events []Event `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{20} + return fileDescriptor_types_62f0c59aeb977f78, []int{20} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2028,32 +2028,32 @@ func (m *ResponseBeginBlock) XXX_DiscardUnknown() { var xxx_messageInfo_ResponseBeginBlock proto.InternalMessageInfo -func (m *ResponseBeginBlock) GetTags() []common.KVPair { +func (m *ResponseBeginBlock) GetEvents() []Event { if m != nil { - return m.Tags + return m.Events } return nil } type ResponseCheckTx struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` - Tags []common.KVPair `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"` - Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{21} + return fileDescriptor_types_62f0c59aeb977f78, []int{21} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2124,9 +2124,9 @@ func (m *ResponseCheckTx) GetGasUsed() int64 { return 0 } -func (m *ResponseCheckTx) GetTags() []common.KVPair { +func (m *ResponseCheckTx) GetEvents() []Event { if m != nil { - return m.Tags + return m.Events } return nil } @@ -2139,24 +2139,24 @@ func (m *ResponseCheckTx) GetCodespace() string { } type ResponseDeliverTx struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` - Tags []common.KVPair `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"` - Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{22} + return fileDescriptor_types_62f0c59aeb977f78, []int{22} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2227,9 +2227,9 @@ func (m *ResponseDeliverTx) GetGasUsed() int64 { return 0 } -func (m *ResponseDeliverTx) GetTags() []common.KVPair { +func (m *ResponseDeliverTx) GetEvents() []Event { if m != nil { - return m.Tags + return m.Events } return nil } @@ -2244,7 +2244,7 @@ func (m *ResponseDeliverTx) GetCodespace() string { type ResponseEndBlock struct { ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates" json:"validator_updates"` ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates" json:"consensus_param_updates,omitempty"` - Tags []common.KVPair `protobuf:"bytes,3,rep,name=tags" json:"tags,omitempty"` + Events []Event `protobuf:"bytes,3,rep,name=events" json:"events,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2254,7 +2254,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{23} + return fileDescriptor_types_62f0c59aeb977f78, []int{23} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2297,9 +2297,9 @@ func (m *ResponseEndBlock) GetConsensusParamUpdates() *ConsensusParams { return nil } -func (m *ResponseEndBlock) GetTags() []common.KVPair { +func (m *ResponseEndBlock) GetEvents() []Event { if m != nil { - return m.Tags + return m.Events } return nil } @@ -2316,7 +2316,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{24} + return fileDescriptor_types_62f0c59aeb977f78, []int{24} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2367,7 +2367,7 @@ func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{25} + return fileDescriptor_types_62f0c59aeb977f78, []int{25} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2432,7 @@ func (m *BlockParams) Reset() { *m = BlockParams{} } func (m *BlockParams) String() string { return proto.CompactTextString(m) } func (*BlockParams) ProtoMessage() {} func (*BlockParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{26} + return fileDescriptor_types_62f0c59aeb977f78, []int{26} } func (m *BlockParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2488,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } func (*EvidenceParams) ProtoMessage() {} func (*EvidenceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{27} + return fileDescriptor_types_62f0c59aeb977f78, []int{27} } func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2536,7 +2536,7 @@ func (m *ValidatorParams) Reset() { *m = ValidatorParams{} } func (m *ValidatorParams) String() string { return proto.CompactTextString(m) } func (*ValidatorParams) ProtoMessage() {} func (*ValidatorParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{28} + return fileDescriptor_types_62f0c59aeb977f78, []int{28} } func (m *ValidatorParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2584,7 +2584,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{29} + return fileDescriptor_types_62f0c59aeb977f78, []int{29} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2627,6 +2627,61 @@ func (m *LastCommitInfo) GetVotes() []VoteInfo { return nil } +type Event struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Attributes []common.KVPair `protobuf:"bytes,2,rep,name=attributes" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_types_62f0c59aeb977f78, []int{30} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Event.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(dst, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *Event) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Event) GetAttributes() []common.KVPair { + if m != nil { + return m.Attributes + } + return nil +} + type Header struct { // basic block info Version Version `protobuf:"bytes,1,opt,name=version" json:"version"` @@ -2658,7 +2713,7 @@ func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{30} + return fileDescriptor_types_62f0c59aeb977f78, []int{31} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2811,7 +2866,7 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{31} + return fileDescriptor_types_62f0c59aeb977f78, []int{32} } func (m *Version) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2866,7 +2921,7 @@ func (m *BlockID) Reset() { *m = BlockID{} } func (m *BlockID) String() string { return proto.CompactTextString(m) } func (*BlockID) ProtoMessage() {} func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{32} + return fileDescriptor_types_62f0c59aeb977f78, []int{33} } func (m *BlockID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2921,7 +2976,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } func (*PartSetHeader) ProtoMessage() {} func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{33} + return fileDescriptor_types_62f0c59aeb977f78, []int{34} } func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2978,7 +3033,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{34} + return fileDescriptor_types_62f0c59aeb977f78, []int{35} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3034,7 +3089,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{35} + return fileDescriptor_types_62f0c59aeb977f78, []int{36} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3090,7 +3145,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{36} + return fileDescriptor_types_62f0c59aeb977f78, []int{37} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3145,7 +3200,7 @@ func (m *PubKey) Reset() { *m = PubKey{} } func (m *PubKey) String() string { return proto.CompactTextString(m) } func (*PubKey) ProtoMessage() {} func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{37} + return fileDescriptor_types_62f0c59aeb977f78, []int{38} } func (m *PubKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3203,7 +3258,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a177e47fab90f91d, []int{38} + return fileDescriptor_types_62f0c59aeb977f78, []int{39} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,6 +3383,8 @@ func init() { golang_proto.RegisterType((*ValidatorParams)(nil), "types.ValidatorParams") proto.RegisterType((*LastCommitInfo)(nil), "types.LastCommitInfo") golang_proto.RegisterType((*LastCommitInfo)(nil), "types.LastCommitInfo") + proto.RegisterType((*Event)(nil), "types.Event") + golang_proto.RegisterType((*Event)(nil), "types.Event") proto.RegisterType((*Header)(nil), "types.Header") golang_proto.RegisterType((*Header)(nil), "types.Header") proto.RegisterType((*Version)(nil), "types.Version") @@ -4560,11 +4617,11 @@ func (this *ResponseBeginBlock) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Tags) != len(that1.Tags) { + if len(this.Events) != len(that1.Events) { return false } - for i := range this.Tags { - if !this.Tags[i].Equal(&that1.Tags[i]) { + for i := range this.Events { + if !this.Events[i].Equal(&that1.Events[i]) { return false } } @@ -4610,11 +4667,11 @@ func (this *ResponseCheckTx) Equal(that interface{}) bool { if this.GasUsed != that1.GasUsed { return false } - if len(this.Tags) != len(that1.Tags) { + if len(this.Events) != len(that1.Events) { return false } - for i := range this.Tags { - if !this.Tags[i].Equal(&that1.Tags[i]) { + for i := range this.Events { + if !this.Events[i].Equal(&that1.Events[i]) { return false } } @@ -4663,11 +4720,11 @@ func (this *ResponseDeliverTx) Equal(that interface{}) bool { if this.GasUsed != that1.GasUsed { return false } - if len(this.Tags) != len(that1.Tags) { + if len(this.Events) != len(that1.Events) { return false } - for i := range this.Tags { - if !this.Tags[i].Equal(&that1.Tags[i]) { + for i := range this.Events { + if !this.Events[i].Equal(&that1.Events[i]) { return false } } @@ -4709,11 +4766,11 @@ func (this *ResponseEndBlock) Equal(that interface{}) bool { if !this.ConsensusParamUpdates.Equal(that1.ConsensusParamUpdates) { return false } - if len(this.Tags) != len(that1.Tags) { + if len(this.Events) != len(that1.Events) { return false } - for i := range this.Tags { - if !this.Tags[i].Equal(&that1.Tags[i]) { + for i := range this.Events { + if !this.Events[i].Equal(&that1.Events[i]) { return false } } @@ -4906,6 +4963,41 @@ func (this *LastCommitInfo) Equal(that interface{}) bool { } return true } +func (this *Event) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Event) + if !ok { + that2, ok := that.(Event) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if len(this.Attributes) != len(that1.Attributes) { + return false + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} func (this *Header) Equal(that interface{}) bool { if that == nil { return this == nil @@ -6691,8 +6783,8 @@ func (m *ResponseBeginBlock) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.Tags) > 0 { - for _, msg := range m.Tags { + if len(m.Events) > 0 { + for _, msg := range m.Events { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) @@ -6757,8 +6849,8 @@ func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) } - if len(m.Tags) > 0 { - for _, msg := range m.Tags { + if len(m.Events) > 0 { + for _, msg := range m.Events { dAtA[i] = 0x3a i++ i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) @@ -6829,8 +6921,8 @@ func (m *ResponseDeliverTx) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) } - if len(m.Tags) > 0 { - for _, msg := range m.Tags { + if len(m.Events) > 0 { + for _, msg := range m.Events { dAtA[i] = 0x3a i++ i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) @@ -6890,8 +6982,8 @@ func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { } i += n32 } - if len(m.Tags) > 0 { - for _, msg := range m.Tags { + if len(m.Events) > 0 { + for _, msg := range m.Events { dAtA[i] = 0x1a i++ i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) @@ -7117,6 +7209,45 @@ func (m *LastCommitInfo) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Attributes) > 0 { + for _, msg := range m.Attributes { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *Header) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7988,10 +8119,10 @@ func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock this := &ResponseBeginBlock{} if r.Intn(10) != 0 { v18 := r.Intn(5) - this.Tags = make([]common.KVPair, v18) + this.Events = make([]Event, v18) for i := 0; i < v18; i++ { - v19 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v19 + v19 := NewPopulatedEvent(r, easy) + this.Events[i] = *v19 } } if !easy && r.Intn(10) != 0 { @@ -8020,10 +8151,10 @@ func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { } if r.Intn(10) != 0 { v21 := r.Intn(5) - this.Tags = make([]common.KVPair, v21) + this.Events = make([]Event, v21) for i := 0; i < v21; i++ { - v22 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v22 + v22 := NewPopulatedEvent(r, easy) + this.Events[i] = *v22 } } this.Codespace = string(randStringTypes(r)) @@ -8053,10 +8184,10 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { } if r.Intn(10) != 0 { v24 := r.Intn(5) - this.Tags = make([]common.KVPair, v24) + this.Events = make([]Event, v24) for i := 0; i < v24; i++ { - v25 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v25 + v25 := NewPopulatedEvent(r, easy) + this.Events[i] = *v25 } } this.Codespace = string(randStringTypes(r)) @@ -8081,10 +8212,10 @@ func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { } if r.Intn(10) != 0 { v28 := r.Intn(5) - this.Tags = make([]common.KVPair, v28) + this.Events = make([]Event, v28) for i := 0; i < v28; i++ { - v29 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v29 + v29 := NewPopulatedEvent(r, easy) + this.Events[i] = *v29 } } if !easy && r.Intn(10) != 0 { @@ -8184,17 +8315,34 @@ func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { return this } +func NewPopulatedEvent(r randyTypes, easy bool) *Event { + this := &Event{} + this.Type = string(randStringTypes(r)) + if r.Intn(10) != 0 { + v34 := r.Intn(5) + this.Attributes = make([]common.KVPair, v34) + for i := 0; i < v34; i++ { + v35 := common.NewPopulatedKVPair(r, easy) + this.Attributes[i] = *v35 + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + } + return this +} + func NewPopulatedHeader(r randyTypes, easy bool) *Header { this := &Header{} - v34 := NewPopulatedVersion(r, easy) - this.Version = *v34 + v36 := NewPopulatedVersion(r, easy) + this.Version = *v36 this.ChainID = string(randStringTypes(r)) this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v35 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v35 + v37 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v37 this.NumTxs = int64(r.Int63()) if r.Intn(2) == 0 { this.NumTxs *= -1 @@ -8203,51 +8351,51 @@ func NewPopulatedHeader(r randyTypes, easy bool) *Header { if r.Intn(2) == 0 { this.TotalTxs *= -1 } - v36 := NewPopulatedBlockID(r, easy) - this.LastBlockId = *v36 - v37 := r.Intn(100) - this.LastCommitHash = make([]byte, v37) - for i := 0; i < v37; i++ { + v38 := NewPopulatedBlockID(r, easy) + this.LastBlockId = *v38 + v39 := r.Intn(100) + this.LastCommitHash = make([]byte, v39) + for i := 0; i < v39; i++ { this.LastCommitHash[i] = byte(r.Intn(256)) } - v38 := r.Intn(100) - this.DataHash = make([]byte, v38) - for i := 0; i < v38; i++ { + v40 := r.Intn(100) + this.DataHash = make([]byte, v40) + for i := 0; i < v40; i++ { this.DataHash[i] = byte(r.Intn(256)) } - v39 := r.Intn(100) - this.ValidatorsHash = make([]byte, v39) - for i := 0; i < v39; i++ { + v41 := r.Intn(100) + this.ValidatorsHash = make([]byte, v41) + for i := 0; i < v41; i++ { this.ValidatorsHash[i] = byte(r.Intn(256)) } - v40 := r.Intn(100) - this.NextValidatorsHash = make([]byte, v40) - for i := 0; i < v40; i++ { + v42 := r.Intn(100) + this.NextValidatorsHash = make([]byte, v42) + for i := 0; i < v42; i++ { this.NextValidatorsHash[i] = byte(r.Intn(256)) } - v41 := r.Intn(100) - this.ConsensusHash = make([]byte, v41) - for i := 0; i < v41; i++ { + v43 := r.Intn(100) + this.ConsensusHash = make([]byte, v43) + for i := 0; i < v43; i++ { this.ConsensusHash[i] = byte(r.Intn(256)) } - v42 := r.Intn(100) - this.AppHash = make([]byte, v42) - for i := 0; i < v42; i++ { + v44 := r.Intn(100) + this.AppHash = make([]byte, v44) + for i := 0; i < v44; i++ { this.AppHash[i] = byte(r.Intn(256)) } - v43 := r.Intn(100) - this.LastResultsHash = make([]byte, v43) - for i := 0; i < v43; i++ { + v45 := r.Intn(100) + this.LastResultsHash = make([]byte, v45) + for i := 0; i < v45; i++ { this.LastResultsHash[i] = byte(r.Intn(256)) } - v44 := r.Intn(100) - this.EvidenceHash = make([]byte, v44) - for i := 0; i < v44; i++ { + v46 := r.Intn(100) + this.EvidenceHash = make([]byte, v46) + for i := 0; i < v46; i++ { this.EvidenceHash[i] = byte(r.Intn(256)) } - v45 := r.Intn(100) - this.ProposerAddress = make([]byte, v45) - for i := 0; i < v45; i++ { + v47 := r.Intn(100) + this.ProposerAddress = make([]byte, v47) + for i := 0; i < v47; i++ { this.ProposerAddress[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8268,13 +8416,13 @@ func NewPopulatedVersion(r randyTypes, easy bool) *Version { func NewPopulatedBlockID(r randyTypes, easy bool) *BlockID { this := &BlockID{} - v46 := r.Intn(100) - this.Hash = make([]byte, v46) - for i := 0; i < v46; i++ { + v48 := r.Intn(100) + this.Hash = make([]byte, v48) + for i := 0; i < v48; i++ { this.Hash[i] = byte(r.Intn(256)) } - v47 := NewPopulatedPartSetHeader(r, easy) - this.PartsHeader = *v47 + v49 := NewPopulatedPartSetHeader(r, easy) + this.PartsHeader = *v49 if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } @@ -8287,9 +8435,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { if r.Intn(2) == 0 { this.Total *= -1 } - v48 := r.Intn(100) - this.Hash = make([]byte, v48) - for i := 0; i < v48; i++ { + v50 := r.Intn(100) + this.Hash = make([]byte, v50) + for i := 0; i < v50; i++ { this.Hash[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8300,9 +8448,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { func NewPopulatedValidator(r randyTypes, easy bool) *Validator { this := &Validator{} - v49 := r.Intn(100) - this.Address = make([]byte, v49) - for i := 0; i < v49; i++ { + v51 := r.Intn(100) + this.Address = make([]byte, v51) + for i := 0; i < v51; i++ { this.Address[i] = byte(r.Intn(256)) } this.Power = int64(r.Int63()) @@ -8317,8 +8465,8 @@ func NewPopulatedValidator(r randyTypes, easy bool) *Validator { func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { this := &ValidatorUpdate{} - v50 := NewPopulatedPubKey(r, easy) - this.PubKey = *v50 + v52 := NewPopulatedPubKey(r, easy) + this.PubKey = *v52 this.Power = int64(r.Int63()) if r.Intn(2) == 0 { this.Power *= -1 @@ -8331,8 +8479,8 @@ func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { this := &VoteInfo{} - v51 := NewPopulatedValidator(r, easy) - this.Validator = *v51 + v53 := NewPopulatedValidator(r, easy) + this.Validator = *v53 this.SignedLastBlock = bool(bool(r.Intn(2) == 0)) if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) @@ -8343,9 +8491,9 @@ func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { this := &PubKey{} this.Type = string(randStringTypes(r)) - v52 := r.Intn(100) - this.Data = make([]byte, v52) - for i := 0; i < v52; i++ { + v54 := r.Intn(100) + this.Data = make([]byte, v54) + for i := 0; i < v54; i++ { this.Data[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8357,14 +8505,14 @@ func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { func NewPopulatedEvidence(r randyTypes, easy bool) *Evidence { this := &Evidence{} this.Type = string(randStringTypes(r)) - v53 := NewPopulatedValidator(r, easy) - this.Validator = *v53 + v55 := NewPopulatedValidator(r, easy) + this.Validator = *v55 this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v54 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v54 + v56 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v56 this.TotalVotingPower = int64(r.Int63()) if r.Intn(2) == 0 { this.TotalVotingPower *= -1 @@ -8394,9 +8542,9 @@ func randUTF8RuneTypes(r randyTypes) rune { return rune(ru + 61) } func randStringTypes(r randyTypes) string { - v55 := r.Intn(100) - tmps := make([]rune, v55) - for i := 0; i < v55; i++ { + v57 := r.Intn(100) + tmps := make([]rune, v57) + for i := 0; i < v57; i++ { tmps[i] = randUTF8RuneTypes(r) } return string(tmps) @@ -8418,11 +8566,11 @@ func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte switch wire { case 0: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - v56 := r.Int63() + v58 := r.Int63() if r.Intn(2) == 0 { - v56 *= -1 + v58 *= -1 } - dAtA = encodeVarintPopulateTypes(dAtA, uint64(v56)) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v58)) case 1: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) @@ -9136,8 +9284,8 @@ func (m *ResponseBeginBlock) Size() (n int) { } var l int _ = l - if len(m.Tags) > 0 { - for _, e := range m.Tags { + if len(m.Events) > 0 { + for _, e := range m.Events { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -9175,8 +9323,8 @@ func (m *ResponseCheckTx) Size() (n int) { if m.GasUsed != 0 { n += 1 + sovTypes(uint64(m.GasUsed)) } - if len(m.Tags) > 0 { - for _, e := range m.Tags { + if len(m.Events) > 0 { + for _, e := range m.Events { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -9218,8 +9366,8 @@ func (m *ResponseDeliverTx) Size() (n int) { if m.GasUsed != 0 { n += 1 + sovTypes(uint64(m.GasUsed)) } - if len(m.Tags) > 0 { - for _, e := range m.Tags { + if len(m.Events) > 0 { + for _, e := range m.Events { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -9250,8 +9398,8 @@ func (m *ResponseEndBlock) Size() (n int) { l = m.ConsensusParamUpdates.Size() n += 1 + l + sovTypes(uint64(l)) } - if len(m.Tags) > 0 { - for _, e := range m.Tags { + if len(m.Events) > 0 { + for _, e := range m.Events { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -9374,6 +9522,28 @@ func (m *LastCommitInfo) Size() (n int) { return n } +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Header) Size() (n int) { if m == nil { return 0 @@ -12570,7 +12740,7 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12594,8 +12764,8 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tags = append(m.Tags, common.KVPair{}) - if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12798,7 +12968,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12822,8 +12992,8 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tags = append(m.Tags, common.KVPair{}) - if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13055,7 +13225,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13079,8 +13249,8 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tags = append(m.Tags, common.KVPair{}) - if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13230,7 +13400,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13254,8 +13424,8 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tags = append(m.Tags, common.KVPair{}) - if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13853,6 +14023,117 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } return nil } +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, common.KVPair{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Header) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -15357,149 +15638,152 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_a177e47fab90f91d) } +func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_62f0c59aeb977f78) } func init() { - golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_a177e47fab90f91d) + golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_62f0c59aeb977f78) } -var fileDescriptor_types_a177e47fab90f91d = []byte{ - // 2203 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x73, 0x1c, 0x47, - 0xf5, 0xd7, 0xec, 0xef, 0x79, 0xab, 0xfd, 0xe1, 0xb6, 0x6c, 0xaf, 0xf7, 0x9b, 0xaf, 0xe4, 0x1a, - 0x43, 0x22, 0x11, 0x67, 0x95, 0x28, 0x98, 0x92, 0xe3, 0x40, 0x95, 0x56, 0x36, 0x48, 0x95, 0x00, - 0x62, 0x6c, 0x8b, 0x0b, 0x55, 0x53, 0xbd, 0x3b, 0xad, 0xdd, 0x29, 0xed, 0xce, 0x4c, 0x66, 0x7a, - 0x95, 0x15, 0x47, 0xce, 0x39, 0xe4, 0xc0, 0x9f, 0xc0, 0x81, 0x3f, 0x21, 0x47, 0x4e, 0x54, 0x8e, - 0x1c, 0x38, 0x1b, 0x10, 0xc5, 0x01, 0xae, 0x14, 0x55, 0x1c, 0xa9, 0x7e, 0xdd, 0xf3, 0x53, 0xb3, - 0x26, 0x0e, 0x9c, 0xb8, 0x48, 0xd3, 0xfd, 0x3e, 0xaf, 0x7f, 0xbc, 0x7d, 0xef, 0x7d, 0xde, 0x6b, - 0xb8, 0x4d, 0x47, 0x63, 0x67, 0x97, 0x5f, 0xfa, 0x2c, 0x94, 0x7f, 0x07, 0x7e, 0xe0, 0x71, 0x8f, - 0x54, 0x71, 0xd0, 0x7f, 0x67, 0xe2, 0xf0, 0xe9, 0x62, 0x34, 0x18, 0x7b, 0xf3, 0xdd, 0x89, 0x37, - 0xf1, 0x76, 0x51, 0x3a, 0x5a, 0x9c, 0xe1, 0x08, 0x07, 0xf8, 0x25, 0xb5, 0xfa, 0x8f, 0x53, 0x70, - 0xce, 0x5c, 0x9b, 0x05, 0x73, 0xc7, 0xe5, 0xe9, 0xcf, 0x71, 0x70, 0xe9, 0x73, 0x6f, 0x77, 0xce, - 0x82, 0xf3, 0x19, 0x53, 0xff, 0x94, 0xf2, 0xfe, 0xbf, 0x55, 0x9e, 0x39, 0xa3, 0x70, 0x77, 0xec, - 0xcd, 0xe7, 0x9e, 0x9b, 0x3e, 0x6c, 0x7f, 0x6b, 0xe2, 0x79, 0x93, 0x19, 0x4b, 0x0e, 0xc7, 0x9d, - 0x39, 0x0b, 0x39, 0x9d, 0xfb, 0x12, 0x60, 0xfc, 0xb6, 0x02, 0x75, 0x93, 0x7d, 0xb2, 0x60, 0x21, - 0x27, 0xdb, 0x50, 0x61, 0xe3, 0xa9, 0xd7, 0x2b, 0xdd, 0xd3, 0xb6, 0x9b, 0x7b, 0x64, 0x20, 0x17, - 0x52, 0xd2, 0xa7, 0xe3, 0xa9, 0x77, 0xb4, 0x66, 0x22, 0x82, 0xbc, 0x0d, 0xd5, 0xb3, 0xd9, 0x22, - 0x9c, 0xf6, 0xca, 0x08, 0xbd, 0x99, 0x85, 0x7e, 0x5f, 0x88, 0x8e, 0xd6, 0x4c, 0x89, 0x11, 0xcb, - 0x3a, 0xee, 0x99, 0xd7, 0xab, 0x14, 0x2d, 0x7b, 0xec, 0x9e, 0xe1, 0xb2, 0x02, 0x41, 0xf6, 0x01, - 0x42, 0xc6, 0x2d, 0xcf, 0xe7, 0x8e, 0xe7, 0xf6, 0xaa, 0x88, 0xbf, 0x93, 0xc5, 0x3f, 0x63, 0xfc, - 0xc7, 0x28, 0x3e, 0x5a, 0x33, 0xf5, 0x30, 0x1a, 0x08, 0x4d, 0xc7, 0x75, 0xb8, 0x35, 0x9e, 0x52, - 0xc7, 0xed, 0xd5, 0x8a, 0x34, 0x8f, 0x5d, 0x87, 0x1f, 0x0a, 0xb1, 0xd0, 0x74, 0xa2, 0x81, 0xb8, - 0xca, 0x27, 0x0b, 0x16, 0x5c, 0xf6, 0xea, 0x45, 0x57, 0xf9, 0x89, 0x10, 0x89, 0xab, 0x20, 0x86, - 0x3c, 0x86, 0xe6, 0x88, 0x4d, 0x1c, 0xd7, 0x1a, 0xcd, 0xbc, 0xf1, 0x79, 0xaf, 0x81, 0x2a, 0xbd, - 0xac, 0xca, 0x50, 0x00, 0x86, 0x42, 0x7e, 0xb4, 0x66, 0xc2, 0x28, 0x1e, 0x91, 0x3d, 0x68, 0x8c, - 0xa7, 0x6c, 0x7c, 0x6e, 0xf1, 0x65, 0x4f, 0x47, 0xcd, 0x5b, 0x59, 0xcd, 0x43, 0x21, 0x7d, 0xbe, - 0x3c, 0x5a, 0x33, 0xeb, 0x63, 0xf9, 0x49, 0x1e, 0x82, 0xce, 0x5c, 0x5b, 0x6d, 0xd7, 0x44, 0xa5, - 0xdb, 0xb9, 0xdf, 0xc5, 0xb5, 0xa3, 0xcd, 0x1a, 0x4c, 0x7d, 0x93, 0x01, 0xd4, 0x84, 0x33, 0x38, - 0xbc, 0xb7, 0x8e, 0x3a, 0x1b, 0xb9, 0x8d, 0x50, 0x76, 0xb4, 0x66, 0x2a, 0x94, 0x30, 0x9f, 0xcd, - 0x66, 0xce, 0x05, 0x0b, 0xc4, 0xe1, 0x6e, 0x16, 0x99, 0xef, 0x89, 0x94, 0xe3, 0xf1, 0x74, 0x3b, - 0x1a, 0x0c, 0xeb, 0x50, 0xbd, 0xa0, 0xb3, 0x05, 0x33, 0xde, 0x82, 0x66, 0xca, 0x53, 0x48, 0x0f, - 0xea, 0x73, 0x16, 0x86, 0x74, 0xc2, 0x7a, 0xda, 0x3d, 0x6d, 0x5b, 0x37, 0xa3, 0xa1, 0xd1, 0x86, - 0xf5, 0xb4, 0x9f, 0x18, 0xf3, 0x58, 0x51, 0xf8, 0x82, 0x50, 0xbc, 0x60, 0x41, 0x28, 0x1c, 0x40, - 0x29, 0xaa, 0x21, 0xb9, 0x0f, 0x2d, 0xb4, 0x83, 0x15, 0xc9, 0x85, 0x9f, 0x56, 0xcc, 0x75, 0x9c, - 0x3c, 0x55, 0xa0, 0x2d, 0x68, 0xfa, 0x7b, 0x7e, 0x0c, 0x29, 0x23, 0x04, 0xfc, 0x3d, 0x5f, 0x01, - 0x8c, 0x0f, 0xa0, 0x9b, 0x77, 0x25, 0xd2, 0x85, 0xf2, 0x39, 0xbb, 0x54, 0xfb, 0x89, 0x4f, 0xb2, - 0xa1, 0xae, 0x85, 0x7b, 0xe8, 0xa6, 0xba, 0xe3, 0xe7, 0xa5, 0x58, 0x39, 0xf6, 0x26, 0xb2, 0x0f, - 0x15, 0x11, 0x54, 0xa8, 0xdd, 0xdc, 0xeb, 0x0f, 0x64, 0xc4, 0x0d, 0xa2, 0x88, 0x1b, 0x3c, 0x8f, - 0x22, 0x6e, 0xd8, 0xf8, 0xf2, 0xe5, 0xd6, 0xda, 0xe7, 0x7f, 0xd8, 0xd2, 0x4c, 0xd4, 0x20, 0x77, - 0x85, 0x43, 0x50, 0xc7, 0xb5, 0x1c, 0x5b, 0xed, 0x53, 0xc7, 0xf1, 0xb1, 0x4d, 0x0e, 0xa0, 0x3b, - 0xf6, 0xdc, 0x90, 0xb9, 0xe1, 0x22, 0xb4, 0x7c, 0x1a, 0xd0, 0x79, 0xa8, 0x62, 0x2d, 0xfa, 0xf9, - 0x0f, 0x23, 0xf1, 0x09, 0x4a, 0xcd, 0xce, 0x38, 0x3b, 0x41, 0x3e, 0x04, 0xb8, 0xa0, 0x33, 0xc7, - 0xa6, 0xdc, 0x0b, 0xc2, 0x5e, 0xe5, 0x5e, 0x39, 0xa5, 0x7c, 0x1a, 0x09, 0x5e, 0xf8, 0x36, 0xe5, - 0x6c, 0x58, 0x11, 0x27, 0x33, 0x53, 0x78, 0xf2, 0x26, 0x74, 0xa8, 0xef, 0x5b, 0x21, 0xa7, 0x9c, - 0x59, 0xa3, 0x4b, 0xce, 0x42, 0x8c, 0xc7, 0x75, 0xb3, 0x45, 0x7d, 0xff, 0x99, 0x98, 0x1d, 0x8a, - 0x49, 0xc3, 0x8e, 0x7f, 0x4d, 0x0c, 0x15, 0x42, 0xa0, 0x62, 0x53, 0x4e, 0xd1, 0x1a, 0xeb, 0x26, - 0x7e, 0x8b, 0x39, 0x9f, 0xf2, 0xa9, 0xba, 0x23, 0x7e, 0x93, 0xdb, 0x50, 0x9b, 0x32, 0x67, 0x32, - 0xe5, 0x78, 0xad, 0xb2, 0xa9, 0x46, 0xc2, 0xf0, 0x7e, 0xe0, 0x5d, 0x30, 0xcc, 0x16, 0x0d, 0x53, - 0x0e, 0x8c, 0xbf, 0x68, 0x70, 0xe3, 0x5a, 0x78, 0x89, 0x75, 0xa7, 0x34, 0x9c, 0x46, 0x7b, 0x89, - 0x6f, 0xf2, 0xb6, 0x58, 0x97, 0xda, 0x2c, 0x50, 0x59, 0xac, 0xa5, 0x6e, 0x7c, 0x84, 0x93, 0xea, - 0xa2, 0x0a, 0x42, 0x9e, 0x42, 0x77, 0x46, 0x43, 0x6e, 0xc9, 0x28, 0xb0, 0x30, 0x4b, 0x95, 0x33, - 0x91, 0xf9, 0x31, 0x8d, 0xa2, 0x45, 0x38, 0xa7, 0x52, 0x6f, 0xcf, 0x32, 0xb3, 0xe4, 0x08, 0x36, - 0x46, 0x97, 0x3f, 0xa7, 0x2e, 0x77, 0x5c, 0x66, 0x5d, 0xb3, 0x79, 0x47, 0x2d, 0xf5, 0xf4, 0xc2, - 0xb1, 0x99, 0x3b, 0x8e, 0x8c, 0x7d, 0x33, 0x56, 0x89, 0x7f, 0x8c, 0xd0, 0xb8, 0x07, 0xed, 0x6c, - 0x2e, 0x20, 0x6d, 0x28, 0xf1, 0xa5, 0xba, 0x61, 0x89, 0x2f, 0x0d, 0x23, 0xf6, 0xc0, 0x38, 0x20, - 0xaf, 0x61, 0x76, 0xa0, 0x93, 0x4b, 0x0e, 0x29, 0x73, 0x6b, 0x69, 0x73, 0x1b, 0x1d, 0x68, 0x65, - 0x72, 0x82, 0xf1, 0x59, 0x15, 0x1a, 0x26, 0x0b, 0x7d, 0xe1, 0x4c, 0x64, 0x1f, 0x74, 0xb6, 0x1c, - 0x33, 0x99, 0x8e, 0xb5, 0x5c, 0xb2, 0x93, 0x98, 0xa7, 0x91, 0x5c, 0xa4, 0x85, 0x18, 0x4c, 0x76, - 0x32, 0x54, 0x72, 0x33, 0xaf, 0x94, 0xe6, 0x92, 0x07, 0x59, 0x2e, 0xd9, 0xc8, 0x61, 0x73, 0x64, - 0xb2, 0x93, 0x21, 0x93, 0xfc, 0xc2, 0x19, 0x36, 0x79, 0x54, 0xc0, 0x26, 0xf9, 0xe3, 0xaf, 0xa0, - 0x93, 0x47, 0x05, 0x74, 0xd2, 0xbb, 0xb6, 0x57, 0x21, 0x9f, 0x3c, 0xc8, 0xf2, 0x49, 0xfe, 0x3a, - 0x39, 0x42, 0xf9, 0xb0, 0x88, 0x50, 0xee, 0xe6, 0x74, 0x56, 0x32, 0xca, 0xfb, 0xd7, 0x18, 0xe5, - 0x76, 0x4e, 0xb5, 0x80, 0x52, 0x1e, 0x65, 0x72, 0x3d, 0x14, 0xde, 0xad, 0x38, 0xd9, 0x93, 0xef, - 0x5c, 0x67, 0xa3, 0x3b, 0xf9, 0x9f, 0xb6, 0x88, 0x8e, 0x76, 0x73, 0x74, 0x74, 0x2b, 0x7f, 0xca, - 0x1c, 0x1f, 0x25, 0xac, 0xb2, 0x23, 0xe2, 0x3e, 0xe7, 0x69, 0x22, 0x47, 0xb0, 0x20, 0xf0, 0x02, - 0x95, 0xb0, 0xe5, 0xc0, 0xd8, 0x16, 0x99, 0x28, 0xf1, 0xaf, 0x57, 0x30, 0x10, 0x3a, 0x7d, 0xca, +var fileDescriptor_types_62f0c59aeb977f78 = []byte{ + // 2241 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x73, 0x1c, 0x49, + 0xf1, 0x57, 0xcf, 0xbb, 0x73, 0x34, 0x0f, 0x97, 0x65, 0x7b, 0x3c, 0x7f, 0xff, 0x25, 0x47, 0x1b, + 0x76, 0x25, 0xd6, 0x3b, 0xda, 0xd5, 0x62, 0x42, 0xc6, 0xcb, 0x46, 0x68, 0x6c, 0x83, 0x14, 0x6b, + 0x40, 0xb4, 0x6d, 0x71, 0x21, 0xa2, 0xa3, 0x66, 0xba, 0x3c, 0xd3, 0xe1, 0x99, 0xee, 0xde, 0xee, + 0x9a, 0xd9, 0x11, 0x47, 0xce, 0x7b, 0xd8, 0x03, 0x1f, 0x81, 0x03, 0x1f, 0x61, 0x8f, 0x9c, 0x88, + 0x3d, 0x72, 0xe0, 0x6c, 0x40, 0x04, 0x17, 0x22, 0x38, 0x03, 0x37, 0xa2, 0xb2, 0xaa, 0x9f, 0xea, + 0x31, 0xbb, 0x86, 0x1b, 0x17, 0xa9, 0xab, 0xf2, 0x97, 0xf5, 0xc8, 0xc9, 0xcc, 0x5f, 0x66, 0xc1, + 0x75, 0x3a, 0x1a, 0x3b, 0xfb, 0xfc, 0xdc, 0x67, 0xa1, 0xfc, 0x3b, 0xf0, 0x03, 0x8f, 0x7b, 0xa4, + 0x8a, 0x83, 0xfe, 0xbb, 0x13, 0x87, 0x4f, 0x17, 0xa3, 0xc1, 0xd8, 0x9b, 0xef, 0x4f, 0xbc, 0x89, + 0xb7, 0x8f, 0xd2, 0xd1, 0xe2, 0x05, 0x8e, 0x70, 0x80, 0x5f, 0x52, 0xab, 0xff, 0x20, 0x05, 0xe7, + 0xcc, 0xb5, 0x59, 0x30, 0x77, 0x5c, 0x9e, 0xfe, 0x1c, 0x07, 0xe7, 0x3e, 0xf7, 0xf6, 0xe7, 0x2c, + 0x78, 0x39, 0x63, 0xea, 0x9f, 0x52, 0x3e, 0xfc, 0xb7, 0xca, 0x33, 0x67, 0x14, 0xee, 0x8f, 0xbd, + 0xf9, 0xdc, 0x73, 0xd3, 0x87, 0xed, 0xef, 0x4c, 0x3c, 0x6f, 0x32, 0x63, 0xc9, 0xe1, 0xb8, 0x33, + 0x67, 0x21, 0xa7, 0x73, 0x5f, 0x02, 0x8c, 0xdf, 0x56, 0xa0, 0x6e, 0xb2, 0x4f, 0x16, 0x2c, 0xe4, + 0x64, 0x17, 0x2a, 0x6c, 0x3c, 0xf5, 0x7a, 0xa5, 0xdb, 0xda, 0x6e, 0xf3, 0x80, 0x0c, 0xe4, 0x42, + 0x4a, 0xfa, 0x78, 0x3c, 0xf5, 0x8e, 0x37, 0x4c, 0x44, 0x90, 0x77, 0xa0, 0xfa, 0x62, 0xb6, 0x08, + 0xa7, 0xbd, 0x32, 0x42, 0xaf, 0x66, 0xa1, 0xdf, 0x17, 0xa2, 0xe3, 0x0d, 0x53, 0x62, 0xc4, 0xb2, + 0x8e, 0xfb, 0xc2, 0xeb, 0x55, 0x8a, 0x96, 0x3d, 0x71, 0x5f, 0xe0, 0xb2, 0x02, 0x41, 0x0e, 0x01, + 0x42, 0xc6, 0x2d, 0xcf, 0xe7, 0x8e, 0xe7, 0xf6, 0xaa, 0x88, 0xbf, 0x91, 0xc5, 0x3f, 0x65, 0xfc, + 0xc7, 0x28, 0x3e, 0xde, 0x30, 0xf5, 0x30, 0x1a, 0x08, 0x4d, 0xc7, 0x75, 0xb8, 0x35, 0x9e, 0x52, + 0xc7, 0xed, 0xd5, 0x8a, 0x34, 0x4f, 0x5c, 0x87, 0x3f, 0x14, 0x62, 0xa1, 0xe9, 0x44, 0x03, 0x71, + 0x95, 0x4f, 0x16, 0x2c, 0x38, 0xef, 0xd5, 0x8b, 0xae, 0xf2, 0x13, 0x21, 0x12, 0x57, 0x41, 0x0c, + 0x79, 0x00, 0xcd, 0x11, 0x9b, 0x38, 0xae, 0x35, 0x9a, 0x79, 0xe3, 0x97, 0xbd, 0x06, 0xaa, 0xf4, + 0xb2, 0x2a, 0x43, 0x01, 0x18, 0x0a, 0xf9, 0xf1, 0x86, 0x09, 0xa3, 0x78, 0x44, 0x0e, 0xa0, 0x31, + 0x9e, 0xb2, 0xf1, 0x4b, 0x8b, 0xaf, 0x7a, 0x3a, 0x6a, 0x5e, 0xcb, 0x6a, 0x3e, 0x14, 0xd2, 0x67, + 0xab, 0xe3, 0x0d, 0xb3, 0x3e, 0x96, 0x9f, 0xe4, 0x1e, 0xe8, 0xcc, 0xb5, 0xd5, 0x76, 0x4d, 0x54, + 0xba, 0x9e, 0xfb, 0x5d, 0x5c, 0x3b, 0xda, 0xac, 0xc1, 0xd4, 0x37, 0x19, 0x40, 0x4d, 0x38, 0x83, + 0xc3, 0x7b, 0x9b, 0xa8, 0xb3, 0x95, 0xdb, 0x08, 0x65, 0xc7, 0x1b, 0xa6, 0x42, 0x09, 0xf3, 0xd9, + 0x6c, 0xe6, 0x2c, 0x59, 0x20, 0x0e, 0x77, 0xb5, 0xc8, 0x7c, 0x8f, 0xa4, 0x1c, 0x8f, 0xa7, 0xdb, + 0xd1, 0x60, 0x58, 0x87, 0xea, 0x92, 0xce, 0x16, 0xcc, 0x78, 0x1b, 0x9a, 0x29, 0x4f, 0x21, 0x3d, + 0xa8, 0xcf, 0x59, 0x18, 0xd2, 0x09, 0xeb, 0x69, 0xb7, 0xb5, 0x5d, 0xdd, 0x8c, 0x86, 0x46, 0x1b, + 0x36, 0xd3, 0x7e, 0x62, 0xcc, 0x63, 0x45, 0xe1, 0x0b, 0x42, 0x71, 0xc9, 0x82, 0x50, 0x38, 0x80, + 0x52, 0x54, 0x43, 0x72, 0x07, 0x5a, 0x68, 0x07, 0x2b, 0x92, 0x0b, 0x3f, 0xad, 0x98, 0x9b, 0x38, + 0x79, 0xa6, 0x40, 0x3b, 0xd0, 0xf4, 0x0f, 0xfc, 0x18, 0x52, 0x46, 0x08, 0xf8, 0x07, 0xbe, 0x02, + 0x18, 0xdf, 0x85, 0x6e, 0xde, 0x95, 0x48, 0x17, 0xca, 0x2f, 0xd9, 0xb9, 0xda, 0x4f, 0x7c, 0x92, + 0x2d, 0x75, 0x2d, 0xdc, 0x43, 0x37, 0xd5, 0x1d, 0x3f, 0x2f, 0xc5, 0xca, 0xb1, 0x37, 0x91, 0x43, + 0xa8, 0x88, 0xa0, 0x42, 0xed, 0xe6, 0x41, 0x7f, 0x20, 0x23, 0x6e, 0x10, 0x45, 0xdc, 0xe0, 0x59, + 0x14, 0x71, 0xc3, 0xc6, 0x97, 0xaf, 0x76, 0x36, 0x3e, 0xff, 0xc3, 0x8e, 0x66, 0xa2, 0x06, 0xb9, + 0x29, 0x1c, 0x82, 0x3a, 0xae, 0xe5, 0xd8, 0x6a, 0x9f, 0x3a, 0x8e, 0x4f, 0x6c, 0x72, 0x04, 0xdd, + 0xb1, 0xe7, 0x86, 0xcc, 0x0d, 0x17, 0xa1, 0xe5, 0xd3, 0x80, 0xce, 0x43, 0x15, 0x6b, 0xd1, 0xcf, + 0xff, 0x30, 0x12, 0x9f, 0xa2, 0xd4, 0xec, 0x8c, 0xb3, 0x13, 0xe4, 0x43, 0x80, 0x25, 0x9d, 0x39, + 0x36, 0xe5, 0x5e, 0x10, 0xf6, 0x2a, 0xb7, 0xcb, 0x29, 0xe5, 0xb3, 0x48, 0xf0, 0xdc, 0xb7, 0x29, + 0x67, 0xc3, 0x8a, 0x38, 0x99, 0x99, 0xc2, 0x93, 0xb7, 0xa0, 0x43, 0x7d, 0xdf, 0x0a, 0x39, 0xe5, + 0xcc, 0x1a, 0x9d, 0x73, 0x16, 0x62, 0x3c, 0x6e, 0x9a, 0x2d, 0xea, 0xfb, 0x4f, 0xc5, 0xec, 0x50, + 0x4c, 0x1a, 0x76, 0xfc, 0x6b, 0x62, 0xa8, 0x10, 0x02, 0x15, 0x9b, 0x72, 0x8a, 0xd6, 0xd8, 0x34, + 0xf1, 0x5b, 0xcc, 0xf9, 0x94, 0x4f, 0xd5, 0x1d, 0xf1, 0x9b, 0x5c, 0x87, 0xda, 0x94, 0x39, 0x93, + 0x29, 0xc7, 0x6b, 0x95, 0x4d, 0x35, 0x12, 0x86, 0xf7, 0x03, 0x6f, 0xc9, 0x30, 0x5b, 0x34, 0x4c, + 0x39, 0x30, 0xfe, 0xa2, 0xc1, 0x95, 0x4b, 0xe1, 0x25, 0xd6, 0x9d, 0xd2, 0x70, 0x1a, 0xed, 0x25, + 0xbe, 0xc9, 0x3b, 0x62, 0x5d, 0x6a, 0xb3, 0x40, 0x65, 0xb1, 0x96, 0xba, 0xf1, 0x31, 0x4e, 0xaa, + 0x8b, 0x2a, 0x08, 0x79, 0x0c, 0xdd, 0x19, 0x0d, 0xb9, 0x25, 0xa3, 0xc0, 0xc2, 0x2c, 0x55, 0xce, + 0x44, 0xe6, 0x13, 0x1a, 0x45, 0x8b, 0x70, 0x4e, 0xa5, 0xde, 0x9e, 0x65, 0x66, 0xc9, 0x31, 0x6c, + 0x8d, 0xce, 0x7f, 0x4e, 0x5d, 0xee, 0xb8, 0xcc, 0xba, 0x64, 0xf3, 0x8e, 0x5a, 0xea, 0xf1, 0xd2, + 0xb1, 0x99, 0x3b, 0x8e, 0x8c, 0x7d, 0x35, 0x56, 0x89, 0x7f, 0x8c, 0xd0, 0xb8, 0x0d, 0xed, 0x6c, + 0x2e, 0x20, 0x6d, 0x28, 0xf1, 0x95, 0xba, 0x61, 0x89, 0xaf, 0x0c, 0x23, 0xf6, 0xc0, 0x38, 0x20, + 0x2f, 0x61, 0xf6, 0xa0, 0x93, 0x4b, 0x0e, 0x29, 0x73, 0x6b, 0x69, 0x73, 0x1b, 0x1d, 0x68, 0x65, + 0x72, 0x82, 0xf1, 0x59, 0x15, 0x1a, 0x26, 0x0b, 0x7d, 0xe1, 0x4c, 0xe4, 0x10, 0x74, 0xb6, 0x1a, + 0x33, 0x99, 0x8e, 0xb5, 0x5c, 0xb2, 0x93, 0x98, 0xc7, 0x91, 0x5c, 0xa4, 0x85, 0x18, 0x4c, 0xf6, + 0x32, 0x54, 0x72, 0x35, 0xaf, 0x94, 0xe6, 0x92, 0xbb, 0x59, 0x2e, 0xd9, 0xca, 0x61, 0x73, 0x64, + 0xb2, 0x97, 0x21, 0x93, 0xfc, 0xc2, 0x19, 0x36, 0xb9, 0x5f, 0xc0, 0x26, 0xf9, 0xe3, 0xaf, 0xa1, + 0x93, 0xfb, 0x05, 0x74, 0xd2, 0xbb, 0xb4, 0x57, 0x21, 0x9f, 0xdc, 0xcd, 0xf2, 0x49, 0xfe, 0x3a, + 0x39, 0x42, 0xf9, 0xb0, 0x88, 0x50, 0x6e, 0xe6, 0x74, 0xd6, 0x32, 0xca, 0x07, 0x97, 0x18, 0xe5, + 0x7a, 0x4e, 0xb5, 0x80, 0x52, 0xee, 0x67, 0x72, 0x3d, 0x14, 0xde, 0xad, 0x38, 0xd9, 0x93, 0xef, + 0x5c, 0x66, 0xa3, 0x1b, 0xf9, 0x9f, 0xb6, 0x88, 0x8e, 0xf6, 0x73, 0x74, 0x74, 0x2d, 0x7f, 0xca, + 0x1c, 0x1f, 0x25, 0xac, 0xb2, 0x27, 0xe2, 0x3e, 0xe7, 0x69, 0x22, 0x47, 0xb0, 0x20, 0xf0, 0x02, + 0x95, 0xb0, 0xe5, 0xc0, 0xd8, 0x15, 0x99, 0x28, 0xf1, 0xaf, 0xd7, 0x30, 0x10, 0x3a, 0x7d, 0xca, 0xbb, 0x8c, 0x2f, 0xb4, 0x44, 0x17, 0x23, 0x3a, 0x9d, 0xc5, 0x74, 0x95, 0xc5, 0x52, 0xc4, 0x54, - 0xca, 0x12, 0xd3, 0x16, 0x34, 0x45, 0xae, 0xcc, 0x71, 0x0e, 0xf5, 0x23, 0xce, 0x21, 0xdf, 0x82, - 0x1b, 0x98, 0x67, 0x24, 0x7d, 0xa9, 0x40, 0xac, 0x60, 0x20, 0x76, 0x84, 0x40, 0x5a, 0x4c, 0x26, - 0xc0, 0x77, 0xe0, 0x66, 0x0a, 0x2b, 0xd6, 0xc5, 0x1c, 0x27, 0x93, 0x6f, 0x37, 0x46, 0x1f, 0xf8, - 0xfe, 0x11, 0x0d, 0xa7, 0xc6, 0x0f, 0x13, 0x03, 0x25, 0x7c, 0x46, 0xa0, 0x32, 0xf6, 0x6c, 0x79, + 0xca, 0x12, 0xd3, 0x0e, 0x34, 0x45, 0xae, 0xcc, 0x71, 0x0e, 0xf5, 0x23, 0xce, 0x21, 0xdf, 0x82, + 0x2b, 0x98, 0x67, 0x24, 0x7d, 0xa9, 0x40, 0xac, 0x60, 0x20, 0x76, 0x84, 0x40, 0x5a, 0x4c, 0x26, + 0xc0, 0x77, 0xe1, 0x6a, 0x0a, 0x2b, 0xd6, 0xc5, 0x1c, 0x27, 0x93, 0x6f, 0x37, 0x46, 0x1f, 0xf9, + 0xfe, 0x31, 0x0d, 0xa7, 0xc6, 0x0f, 0x13, 0x03, 0x25, 0x7c, 0x46, 0xa0, 0x32, 0xf6, 0x6c, 0x79, 0xef, 0x96, 0x89, 0xdf, 0x82, 0xe3, 0x66, 0xde, 0x04, 0x0f, 0xa7, 0x9b, 0xe2, 0x53, 0xa0, 0xe2, 0x50, 0xd2, 0x65, 0xcc, 0x18, 0xbf, 0xd4, 0x92, 0xf5, 0x12, 0x8a, 0x2b, 0x62, 0x23, 0xed, 0x3f, - 0x61, 0xa3, 0xd2, 0xeb, 0xb1, 0x91, 0x71, 0xa5, 0x25, 0x3f, 0x59, 0xcc, 0x33, 0x5f, 0xef, 0x8a, - 0xc2, 0x7b, 0x1c, 0xd7, 0x66, 0x4b, 0x34, 0x69, 0xd9, 0x94, 0x83, 0xa8, 0x04, 0xa8, 0xa1, 0x99, - 0xb3, 0x25, 0x40, 0x1d, 0xe7, 0xe4, 0x80, 0xdc, 0x47, 0x7e, 0xf2, 0xce, 0x54, 0xa8, 0xb6, 0x06, - 0xaa, 0x50, 0x3f, 0x11, 0x93, 0xa6, 0x94, 0xa5, 0xb2, 0xad, 0x9e, 0x21, 0xb7, 0x37, 0x40, 0x17, - 0x07, 0x0d, 0x7d, 0x3a, 0x66, 0x18, 0x79, 0xba, 0x99, 0x4c, 0x18, 0x27, 0x40, 0xae, 0x47, 0x3c, - 0xf9, 0x00, 0x2a, 0x9c, 0x4e, 0x84, 0xbd, 0x85, 0xc9, 0xda, 0x03, 0x59, 0xe4, 0x0f, 0x3e, 0x3a, - 0x3d, 0xa1, 0x4e, 0x30, 0xbc, 0x2d, 0x4c, 0xf5, 0xb7, 0x97, 0x5b, 0x6d, 0x81, 0x79, 0xe0, 0xcd, - 0x1d, 0xce, 0xe6, 0x3e, 0xbf, 0x34, 0x51, 0xc7, 0xf8, 0xbb, 0x26, 0x98, 0x20, 0x93, 0x09, 0x0a, - 0x0d, 0x17, 0xb9, 0x7b, 0x29, 0x45, 0xda, 0x5f, 0xcd, 0x98, 0xff, 0x0f, 0x30, 0xa1, 0xa1, 0xf5, - 0x29, 0x75, 0x39, 0xb3, 0x95, 0x45, 0xf5, 0x09, 0x0d, 0x7f, 0x8a, 0x13, 0xa2, 0xc2, 0x11, 0xe2, - 0x45, 0xc8, 0x6c, 0x34, 0x6d, 0xd9, 0xac, 0x4f, 0x68, 0xf8, 0x22, 0x64, 0x76, 0x7c, 0xaf, 0xfa, - 0xeb, 0xdf, 0x2b, 0x6b, 0xc7, 0x46, 0xde, 0x8e, 0xff, 0x48, 0xf9, 0x70, 0x42, 0x92, 0xff, 0xfb, - 0xf7, 0xfe, 0xab, 0x26, 0x6a, 0x83, 0x6c, 0x1a, 0x26, 0xc7, 0x70, 0x23, 0x8e, 0x23, 0x6b, 0x81, - 0xf1, 0x15, 0xf9, 0xd2, 0xab, 0xc3, 0xaf, 0x7b, 0x91, 0x9d, 0x0e, 0xc9, 0x8f, 0xe0, 0x4e, 0x2e, - 0x0b, 0xc4, 0x0b, 0x96, 0x5e, 0x99, 0x0c, 0x6e, 0x65, 0x93, 0x41, 0xb4, 0x5e, 0x64, 0x89, 0xf2, - 0xd7, 0xf0, 0xec, 0x6f, 0x88, 0x42, 0x29, 0x4d, 0x1e, 0x45, 0xbf, 0xa5, 0xf1, 0x2b, 0x0d, 0x3a, - 0xb9, 0xc3, 0x90, 0x6d, 0xa8, 0x4a, 0xfe, 0xd2, 0x32, 0xed, 0x28, 0x5a, 0x4b, 0x9d, 0x57, 0x02, - 0xc8, 0x7b, 0xd0, 0x60, 0xaa, 0x66, 0x53, 0x17, 0xbc, 0x95, 0x2b, 0xe5, 0x14, 0x3e, 0x86, 0x91, - 0x6f, 0x83, 0x1e, 0x9b, 0x2d, 0x57, 0xaf, 0xc7, 0x56, 0x56, 0x4a, 0x09, 0xd0, 0x38, 0x84, 0x66, - 0x6a, 0x7b, 0xf2, 0x7f, 0xa0, 0xcf, 0xe9, 0x52, 0x15, 0xdd, 0xb2, 0x5c, 0x6b, 0xcc, 0xe9, 0x12, - 0xeb, 0x6d, 0x72, 0x07, 0xea, 0x42, 0x38, 0xa1, 0xd2, 0xe8, 0x65, 0xb3, 0x36, 0xa7, 0xcb, 0x1f, - 0xd0, 0xd0, 0xd8, 0x81, 0x76, 0xf6, 0x58, 0x11, 0x34, 0x22, 0x40, 0x09, 0x3d, 0x98, 0x30, 0xe3, - 0x21, 0x74, 0x72, 0xa7, 0x21, 0x06, 0xb4, 0xfc, 0xc5, 0xc8, 0x3a, 0x67, 0x97, 0x16, 0x1e, 0x17, - 0x5d, 0x44, 0x37, 0x9b, 0xfe, 0x62, 0xf4, 0x11, 0xbb, 0x7c, 0x2e, 0xa6, 0x8c, 0x67, 0xd0, 0xce, - 0x96, 0xc3, 0x22, 0x45, 0x06, 0xde, 0xc2, 0xb5, 0x71, 0xfd, 0xaa, 0x29, 0x07, 0xa2, 0xa3, 0xbe, - 0xf0, 0xa4, 0x57, 0xa4, 0xeb, 0xdf, 0x53, 0x8f, 0xb3, 0x54, 0x11, 0x2d, 0x31, 0xc6, 0x2f, 0xaa, - 0x50, 0x93, 0xb5, 0x39, 0x19, 0x64, 0x3b, 0x3f, 0xe1, 0x12, 0x4a, 0x53, 0xce, 0x2a, 0xc5, 0x98, - 0x76, 0xdf, 0xcc, 0xb7, 0x4f, 0xc3, 0xe6, 0xd5, 0xcb, 0xad, 0x3a, 0x52, 0xd6, 0xf1, 0x93, 0xa4, - 0x97, 0x5a, 0xd5, 0x6a, 0x44, 0x8d, 0x5b, 0xe5, 0xb5, 0x1b, 0xb7, 0x3b, 0x50, 0x77, 0x17, 0x73, - 0x8b, 0x2f, 0x43, 0x15, 0xfa, 0x35, 0x77, 0x31, 0x7f, 0xbe, 0xc4, 0x9f, 0x8e, 0x7b, 0x9c, 0xce, - 0x50, 0x24, 0x03, 0xbf, 0x81, 0x13, 0x42, 0xb8, 0x0f, 0xad, 0x14, 0xb3, 0x3b, 0xb6, 0xaa, 0x10, - 0xdb, 0x69, 0x0f, 0x3c, 0x7e, 0xa2, 0x6e, 0xd9, 0x8c, 0x99, 0xfe, 0xd8, 0x26, 0xdb, 0xd9, 0x3e, - 0x05, 0x0b, 0x82, 0x06, 0xfa, 0x79, 0xaa, 0x15, 0x11, 0xe5, 0x80, 0x38, 0x80, 0xf0, 0x7c, 0x09, - 0xd1, 0x11, 0xd2, 0x10, 0x13, 0x28, 0x7c, 0x0b, 0x3a, 0x09, 0xa7, 0x4a, 0x08, 0xc8, 0x55, 0x92, - 0x69, 0x04, 0xbe, 0x0b, 0x1b, 0x2e, 0x5b, 0x72, 0x2b, 0x8f, 0x6e, 0x22, 0x9a, 0x08, 0xd9, 0x69, - 0x56, 0xe3, 0x9b, 0xd0, 0x4e, 0x72, 0x03, 0x62, 0xd7, 0x65, 0xb7, 0x18, 0xcf, 0x22, 0xec, 0x2e, - 0x34, 0xe2, 0x8a, 0xa6, 0x85, 0x80, 0x3a, 0x95, 0x85, 0x4c, 0x5c, 0x23, 0x05, 0x2c, 0x5c, 0xcc, - 0xb8, 0x5a, 0xa4, 0x8d, 0x18, 0xac, 0x91, 0x4c, 0x39, 0x8f, 0xd8, 0xfb, 0xd0, 0x8a, 0x42, 0x4e, - 0xe2, 0x3a, 0x88, 0x5b, 0x8f, 0x26, 0x11, 0xb4, 0x03, 0x5d, 0x3f, 0xf0, 0x7c, 0x2f, 0x64, 0x81, - 0x45, 0x6d, 0x3b, 0x60, 0x61, 0xd8, 0xeb, 0xca, 0xf5, 0xa2, 0xf9, 0x03, 0x39, 0x6d, 0xbc, 0x07, - 0xf5, 0xa8, 0x54, 0xdb, 0x80, 0xea, 0x30, 0x4e, 0x0f, 0x15, 0x53, 0x0e, 0x04, 0x29, 0x1c, 0xf8, - 0xbe, 0x7a, 0x70, 0x10, 0x9f, 0xc6, 0xcf, 0xa0, 0xae, 0x7e, 0xb0, 0xc2, 0x36, 0xf4, 0xbb, 0xb0, - 0xee, 0xd3, 0x40, 0x5c, 0x23, 0xdd, 0x8c, 0x46, 0xcd, 0xc0, 0x09, 0x0d, 0xf8, 0x33, 0xc6, 0x33, - 0x3d, 0x69, 0x13, 0xf1, 0x72, 0xca, 0x78, 0x04, 0xad, 0x0c, 0x46, 0x1c, 0x0b, 0xfd, 0x28, 0x8a, - 0x34, 0x1c, 0xc4, 0x3b, 0x97, 0x92, 0x9d, 0x8d, 0xc7, 0xa0, 0xc7, 0xbf, 0x8d, 0xa8, 0x59, 0xa3, - 0xab, 0x6b, 0xca, 0xdc, 0x72, 0x88, 0x7d, 0xb6, 0xf7, 0x29, 0x0b, 0x54, 0x4c, 0xc8, 0x81, 0xf1, - 0x22, 0x95, 0x19, 0x64, 0x9a, 0x26, 0x0f, 0xa0, 0xae, 0x32, 0x83, 0x8a, 0xca, 0xa8, 0xa3, 0x3e, - 0xc1, 0xd4, 0x10, 0x75, 0xd4, 0x32, 0x51, 0x24, 0xcb, 0x96, 0xd2, 0xcb, 0xce, 0xa0, 0x11, 0x45, - 0x7f, 0x36, 0x45, 0xca, 0x15, 0xbb, 0xf9, 0x14, 0xa9, 0x16, 0x4d, 0x80, 0xc2, 0x3b, 0x42, 0x67, - 0xe2, 0x32, 0xdb, 0x4a, 0x42, 0x08, 0xf7, 0x68, 0x98, 0x1d, 0x29, 0xf8, 0x38, 0x8a, 0x17, 0xe3, - 0x5d, 0xa8, 0xc9, 0xb3, 0x09, 0xfb, 0x88, 0x95, 0xa3, 0x32, 0x5e, 0x7c, 0x17, 0xf2, 0xc4, 0xef, - 0x35, 0x68, 0x44, 0xc9, 0xb3, 0x50, 0x29, 0x73, 0xe8, 0xd2, 0x57, 0x3d, 0xf4, 0x7f, 0x3f, 0xf1, - 0x3c, 0x00, 0x22, 0xf3, 0xcb, 0x85, 0xc7, 0x1d, 0x77, 0x62, 0x49, 0x5b, 0xcb, 0x1c, 0xd4, 0x45, - 0xc9, 0x29, 0x0a, 0x4e, 0xc4, 0xfc, 0xde, 0x67, 0x55, 0xe8, 0x1c, 0x0c, 0x0f, 0x8f, 0x0f, 0x7c, - 0x7f, 0xe6, 0x8c, 0x29, 0xb6, 0x06, 0xbb, 0x50, 0xc1, 0xee, 0xa8, 0xe0, 0x75, 0xb7, 0x5f, 0xd4, - 0xa6, 0x93, 0x3d, 0xa8, 0x62, 0x93, 0x44, 0x8a, 0x1e, 0x79, 0xfb, 0x85, 0xdd, 0xba, 0xd8, 0x44, - 0xb6, 0x51, 0xd7, 0xdf, 0x7a, 0xfb, 0x45, 0x2d, 0x3b, 0xf9, 0x1e, 0xe8, 0x49, 0xf7, 0xb2, 0xea, - 0xc5, 0xb7, 0xbf, 0xb2, 0x79, 0x17, 0xfa, 0x49, 0xa5, 0xb7, 0xea, 0xe1, 0xb2, 0xbf, 0xb2, 0xcb, - 0x25, 0xfb, 0x50, 0x8f, 0xea, 0xe3, 0xe2, 0x37, 0xd9, 0xfe, 0x8a, 0xc6, 0x5a, 0x98, 0x47, 0x36, - 0x24, 0x45, 0x0f, 0xc7, 0xfd, 0xc2, 0xee, 0x9f, 0x3c, 0x84, 0x9a, 0x2a, 0x5a, 0x0a, 0xdf, 0x65, - 0xfb, 0xc5, 0xed, 0xb1, 0xb8, 0x64, 0xd2, 0x92, 0xad, 0x7a, 0xdc, 0xee, 0xaf, 0x7c, 0xa6, 0x20, - 0x07, 0x00, 0xa9, 0xbe, 0x62, 0xe5, 0xab, 0x75, 0x7f, 0xf5, 0xf3, 0x03, 0x79, 0x0c, 0x8d, 0xe4, - 0x49, 0xa9, 0xf8, 0x1d, 0xba, 0xbf, 0xea, 0x45, 0x60, 0xf8, 0xc6, 0x3f, 0xff, 0xb4, 0xa9, 0xfd, - 0xfa, 0x6a, 0x53, 0xfb, 0xe2, 0x6a, 0x53, 0xfb, 0xf2, 0x6a, 0x53, 0xfb, 0xdd, 0xd5, 0xa6, 0xf6, - 0xc7, 0xab, 0x4d, 0xed, 0x37, 0x7f, 0xde, 0xd4, 0x46, 0x35, 0x74, 0xff, 0xf7, 0xff, 0x15, 0x00, - 0x00, 0xff, 0xff, 0x38, 0x2d, 0x52, 0x86, 0x77, 0x19, 0x00, 0x00, + 0x61, 0xa3, 0xd2, 0xd7, 0x63, 0x23, 0xe3, 0x42, 0x4b, 0x7e, 0xb2, 0x98, 0x67, 0xde, 0xec, 0x8a, + 0xc2, 0x7b, 0x1c, 0xd7, 0x66, 0x2b, 0x34, 0x69, 0xd9, 0x94, 0x83, 0xa8, 0x04, 0xa8, 0xa1, 0x99, + 0xb3, 0x25, 0x40, 0x1d, 0xe7, 0xe4, 0x80, 0xdc, 0x41, 0x7e, 0xf2, 0x5e, 0xa8, 0x50, 0x6d, 0x0d, + 0x54, 0xa1, 0x7e, 0x2a, 0x26, 0x4d, 0x29, 0x4b, 0x65, 0x5b, 0x3d, 0x43, 0x6e, 0xb7, 0x40, 0x17, + 0x07, 0x0d, 0x7d, 0x3a, 0x66, 0x18, 0x79, 0xba, 0x99, 0x4c, 0x18, 0xcf, 0x80, 0x5c, 0x8e, 0x78, + 0xf2, 0x11, 0xd4, 0xd8, 0x92, 0xb9, 0x5c, 0x58, 0x5c, 0x18, 0x6d, 0x33, 0xa6, 0x13, 0xe6, 0xf2, + 0x61, 0x4f, 0x98, 0xea, 0xaf, 0xaf, 0x76, 0xba, 0x12, 0x73, 0xd7, 0x9b, 0x3b, 0x9c, 0xcd, 0x7d, + 0x7e, 0x6e, 0x2a, 0x2d, 0xe3, 0xef, 0x9a, 0x60, 0x83, 0x4c, 0x36, 0x28, 0x34, 0x5e, 0xe4, 0xf2, + 0xa5, 0x14, 0x71, 0x7f, 0x35, 0x83, 0xfe, 0x3f, 0xc0, 0x84, 0x86, 0xd6, 0xa7, 0xd4, 0xe5, 0xcc, + 0x56, 0x56, 0xd5, 0x27, 0x34, 0xfc, 0x29, 0x4e, 0x88, 0x2a, 0x47, 0x88, 0x17, 0x21, 0xb3, 0xd1, + 0xbc, 0x65, 0xb3, 0x3e, 0xa1, 0xe1, 0xf3, 0x90, 0xd9, 0xa9, 0xbb, 0xd5, 0xdf, 0xe4, 0x6e, 0x59, + 0x7b, 0x36, 0xf2, 0xf6, 0xfc, 0x67, 0xca, 0x97, 0x13, 0xb2, 0xfc, 0xdf, 0xb8, 0xfb, 0xdf, 0x34, + 0x51, 0x27, 0x64, 0x53, 0x32, 0x39, 0x81, 0x2b, 0x71, 0x4c, 0x59, 0x0b, 0x8c, 0xb5, 0xc8, 0xab, + 0x5e, 0x1f, 0x8a, 0xdd, 0x65, 0x76, 0x3a, 0x24, 0x3f, 0x82, 0x1b, 0xb9, 0x8c, 0x10, 0x2f, 0x58, + 0x7a, 0x6d, 0x62, 0xb8, 0x96, 0x4d, 0x0c, 0xd1, 0x7a, 0x89, 0x35, 0xca, 0x6f, 0xe4, 0xe5, 0xdf, + 0x10, 0x85, 0x53, 0x9a, 0x4c, 0x8a, 0x7e, 0x53, 0xe3, 0x57, 0x1a, 0x74, 0x72, 0x07, 0x22, 0xbb, + 0x50, 0x95, 0x7c, 0xa6, 0x65, 0xda, 0x53, 0xb4, 0x98, 0x3a, 0xb3, 0x04, 0x90, 0xf7, 0xa1, 0xc1, + 0x54, 0x0d, 0xa7, 0x2e, 0x79, 0x2d, 0x57, 0xda, 0x29, 0x7c, 0x0c, 0x23, 0xdf, 0x06, 0x3d, 0x36, + 0x5d, 0xae, 0x7e, 0x8f, 0x2d, 0xad, 0x94, 0x12, 0xa0, 0xf1, 0x10, 0x9a, 0xa9, 0xed, 0xc9, 0xff, + 0x81, 0x3e, 0xa7, 0x2b, 0x55, 0x84, 0xcb, 0xf2, 0xad, 0x31, 0xa7, 0x2b, 0xac, 0xbf, 0xc9, 0x0d, + 0xa8, 0x0b, 0xe1, 0x84, 0x4a, 0xc3, 0x97, 0xcd, 0xda, 0x9c, 0xae, 0x7e, 0x40, 0x43, 0x63, 0x0f, + 0xda, 0xd9, 0x63, 0x45, 0xd0, 0x88, 0x10, 0x25, 0xf4, 0x68, 0xc2, 0x8c, 0x7b, 0xd0, 0xc9, 0x9d, + 0x86, 0x18, 0xd0, 0xf2, 0x17, 0x23, 0xeb, 0x25, 0x3b, 0xb7, 0xf0, 0xb8, 0xe8, 0x26, 0xba, 0xd9, + 0xf4, 0x17, 0xa3, 0x8f, 0xd9, 0xf9, 0x33, 0x31, 0x65, 0x3c, 0x85, 0x76, 0xb6, 0x3c, 0x16, 0x29, + 0x33, 0xf0, 0x16, 0xae, 0x8d, 0xeb, 0x57, 0x4d, 0x39, 0x10, 0x1d, 0xf6, 0xd2, 0x93, 0x9e, 0x91, + 0xae, 0x87, 0xcf, 0x3c, 0xce, 0x52, 0x45, 0xb5, 0xc4, 0x18, 0x0e, 0x54, 0xf1, 0x37, 0x17, 0xbf, + 0x9f, 0xc0, 0x45, 0x14, 0x2c, 0xbe, 0xc9, 0x13, 0x00, 0xca, 0x79, 0xe0, 0x8c, 0x16, 0xc9, 0x72, + 0xed, 0x81, 0x7c, 0xf6, 0x18, 0x7c, 0x7c, 0x76, 0x4a, 0x9d, 0x60, 0x78, 0x4b, 0xf9, 0xca, 0x56, + 0x82, 0x4c, 0xf9, 0x4b, 0x4a, 0xdf, 0xf8, 0x45, 0x15, 0x6a, 0xb2, 0x2d, 0x20, 0x83, 0x6c, 0xd3, + 0x29, 0x56, 0x55, 0x87, 0x94, 0xb3, 0xea, 0x8c, 0x31, 0xe3, 0xbf, 0x95, 0xef, 0xdc, 0x86, 0xcd, + 0x8b, 0x57, 0x3b, 0x75, 0x64, 0xcb, 0x93, 0x47, 0x49, 0x1b, 0xb7, 0xae, 0xcb, 0x89, 0x7a, 0xc6, + 0xca, 0xd7, 0xee, 0x19, 0x6f, 0x40, 0xdd, 0x5d, 0xcc, 0x2d, 0xbe, 0x0a, 0x55, 0xb6, 0xa9, 0xb9, + 0x8b, 0xf9, 0xb3, 0x15, 0x7a, 0x09, 0xf7, 0x38, 0x9d, 0xa1, 0x48, 0xe6, 0x9a, 0x06, 0x4e, 0x08, + 0xe1, 0x21, 0xb4, 0x52, 0x45, 0x85, 0x63, 0xab, 0xe2, 0xb4, 0x9d, 0x76, 0xf6, 0x93, 0x47, 0xea, + 0x96, 0xcd, 0xb8, 0xc8, 0x38, 0xb1, 0xc9, 0x6e, 0xb6, 0x45, 0xc2, 0x5a, 0xa4, 0x81, 0x21, 0x95, + 0xea, 0x82, 0x44, 0x25, 0x22, 0x0e, 0x20, 0x82, 0x4c, 0x42, 0x74, 0x84, 0x34, 0xc4, 0x04, 0x0a, + 0xdf, 0x86, 0x4e, 0x42, 0xe7, 0x12, 0x02, 0x72, 0x95, 0x64, 0x1a, 0x81, 0xef, 0xc1, 0x96, 0xcb, + 0x56, 0xdc, 0xca, 0xa3, 0x9b, 0x88, 0x26, 0x42, 0x76, 0x96, 0xd5, 0xf8, 0x26, 0xb4, 0x93, 0x54, + 0x84, 0xd8, 0x4d, 0xd9, 0xa8, 0xc6, 0xb3, 0x08, 0xbb, 0x09, 0x8d, 0xb8, 0x98, 0x6a, 0x21, 0xa0, + 0x4e, 0x65, 0x0d, 0x15, 0x97, 0x67, 0x01, 0x0b, 0x17, 0x33, 0xae, 0x16, 0x69, 0x23, 0x06, 0xcb, + 0x33, 0x53, 0xce, 0x23, 0xf6, 0x0e, 0xb4, 0xa2, 0xe8, 0x96, 0xb8, 0x0e, 0xe2, 0x36, 0xa3, 0x49, + 0x04, 0xed, 0x41, 0xd7, 0x0f, 0x3c, 0xdf, 0x0b, 0x59, 0x60, 0x51, 0xdb, 0x0e, 0x58, 0x18, 0xf6, + 0xba, 0x72, 0xbd, 0x68, 0xfe, 0x48, 0x4e, 0x1b, 0xef, 0x43, 0x3d, 0xaa, 0x12, 0xb7, 0xa0, 0x3a, + 0x8c, 0x33, 0x51, 0xc5, 0x94, 0x03, 0xc1, 0x43, 0x47, 0xbe, 0xaf, 0xde, 0x3a, 0xc4, 0xa7, 0xf1, + 0x33, 0xa8, 0xab, 0x1f, 0xac, 0xb0, 0x03, 0xfe, 0x1e, 0x6c, 0xfa, 0x34, 0x10, 0xd7, 0x48, 0xf7, + 0xc1, 0x51, 0x1f, 0x72, 0x4a, 0x03, 0xfe, 0x94, 0xf1, 0x4c, 0x3b, 0xdc, 0x44, 0xbc, 0x9c, 0x32, + 0xee, 0x43, 0x2b, 0x83, 0x11, 0xc7, 0x42, 0x3f, 0x8a, 0x82, 0x1a, 0x07, 0xf1, 0xce, 0xa5, 0x64, + 0x67, 0xe3, 0x01, 0xe8, 0xf1, 0x6f, 0x23, 0xca, 0xe5, 0xe8, 0xea, 0x9a, 0x32, 0xb7, 0x1c, 0x62, + 0x8b, 0xef, 0x7d, 0xca, 0x02, 0x15, 0x13, 0x72, 0x60, 0x3c, 0x4f, 0x25, 0x21, 0xc9, 0x0a, 0xe4, + 0x2e, 0xd4, 0x55, 0x12, 0x52, 0x51, 0x19, 0x35, 0xf3, 0xa7, 0x98, 0x85, 0xa2, 0x66, 0x5e, 0xe6, + 0xa4, 0x64, 0xd9, 0x52, 0x7a, 0xd9, 0x19, 0x34, 0xa2, 0x44, 0x93, 0xcd, 0xc6, 0x72, 0xc5, 0x6e, + 0x3e, 0x1b, 0xab, 0x45, 0x13, 0xa0, 0xf0, 0x8e, 0xd0, 0x99, 0xb8, 0xcc, 0xb6, 0x92, 0x10, 0xc2, + 0x3d, 0x1a, 0x66, 0x47, 0x0a, 0x9e, 0x44, 0xf1, 0x62, 0xbc, 0x07, 0x35, 0x79, 0xb6, 0xc2, 0xf4, + 0x55, 0x44, 0x49, 0xbf, 0xd7, 0xa0, 0x11, 0xe5, 0xe9, 0x42, 0xa5, 0xcc, 0xa1, 0x4b, 0x5f, 0xf5, + 0xd0, 0xff, 0xfd, 0xc4, 0x73, 0x17, 0x88, 0xcc, 0x2f, 0x4b, 0x8f, 0x3b, 0xee, 0xc4, 0x92, 0xb6, + 0x96, 0x39, 0xa8, 0x8b, 0x92, 0x33, 0x14, 0x9c, 0x8a, 0xf9, 0x83, 0xcf, 0xaa, 0xd0, 0x39, 0x1a, + 0x3e, 0x3c, 0x39, 0xf2, 0xfd, 0x99, 0x33, 0xa6, 0xd8, 0x95, 0xec, 0x43, 0x05, 0x1b, 0xb3, 0x82, + 0x87, 0xe5, 0x7e, 0xd1, 0x0b, 0x01, 0x39, 0x80, 0x2a, 0xf6, 0x67, 0xa4, 0xe8, 0x7d, 0xb9, 0x5f, + 0xf8, 0x50, 0x20, 0x36, 0x91, 0x1d, 0xdc, 0xe5, 0x67, 0xe6, 0x7e, 0xd1, 0x6b, 0x01, 0xf9, 0x08, + 0xf4, 0xa4, 0x71, 0x5a, 0xf7, 0xd8, 0xdc, 0x5f, 0xfb, 0x6e, 0x20, 0xf4, 0x93, 0xe2, 0x72, 0xdd, + 0x9b, 0x69, 0x7f, 0x6d, 0x83, 0x4d, 0x0e, 0xa1, 0x1e, 0x95, 0xe5, 0xc5, 0xcf, 0xc1, 0xfd, 0x35, + 0x3d, 0xbd, 0x30, 0x8f, 0xec, 0x85, 0x8a, 0xde, 0xac, 0xfb, 0x85, 0x0f, 0x0f, 0xe4, 0x1e, 0xd4, + 0x54, 0x7d, 0x54, 0xf8, 0x24, 0xdc, 0x2f, 0xee, 0xcc, 0xc5, 0x25, 0x93, 0x6e, 0x70, 0xdd, 0xbb, + 0x7a, 0x7f, 0xed, 0x0b, 0x09, 0x39, 0x02, 0x48, 0xb5, 0x34, 0x6b, 0x1f, 0xcc, 0xfb, 0xeb, 0x5f, + 0x3e, 0xc8, 0x03, 0x68, 0x24, 0xaf, 0x59, 0xc5, 0x4f, 0xe0, 0xfd, 0x75, 0x8f, 0x11, 0xc3, 0x5b, + 0xff, 0xf8, 0xd3, 0xb6, 0xf6, 0xeb, 0x8b, 0x6d, 0xed, 0x8b, 0x8b, 0x6d, 0xed, 0xcb, 0x8b, 0x6d, + 0xed, 0x77, 0x17, 0xdb, 0xda, 0x1f, 0x2f, 0xb6, 0xb5, 0xdf, 0xfc, 0x79, 0x5b, 0x1b, 0xd5, 0xd0, + 0xfd, 0x3f, 0xf8, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x33, 0xc0, 0x3b, 0xf2, 0x19, 0x00, + 0x00, } diff --git a/abci/types/types.proto b/abci/types/types.proto index 8eeecb39..8a2da5b4 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -165,7 +165,7 @@ message ResponseQuery { } message ResponseBeginBlock { - repeated common.KVPair tags = 1 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; + repeated Event events = 1 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"]; } message ResponseCheckTx { @@ -175,7 +175,7 @@ message ResponseCheckTx { string info = 4; // nondeterministic int64 gas_wanted = 5; int64 gas_used = 6; - repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; + repeated Event events = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"]; string codespace = 8; } @@ -186,14 +186,14 @@ message ResponseDeliverTx { string info = 4; // nondeterministic int64 gas_wanted = 5; int64 gas_used = 6; - repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; + repeated Event events = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"]; string codespace = 8; } message ResponseEndBlock { repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable)=false]; ConsensusParams consensus_param_updates = 2; - repeated common.KVPair tags = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; + repeated Event events = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"]; } message ResponseCommit { @@ -212,7 +212,7 @@ message ConsensusParams { ValidatorParams validator = 3; } -// BlockParams contains limits on the block size and timestamp. +// BlockParams contains limits on the block size. message BlockParams { // Note: must be greater than 0 int64 max_bytes = 1; @@ -236,6 +236,11 @@ message LastCommitInfo { repeated VoteInfo votes = 2 [(gogoproto.nullable)=false]; } +message Event { + string type = 1; + repeated common.KVPair attributes = 2 [(gogoproto.nullable)=false, (gogoproto.jsontag)="attributes,omitempty"]; +} + //---------------------------------------- // Blockchain Types diff --git a/abci/types/typespb_test.go b/abci/types/typespb_test.go index a4c0a3f8..52356153 100644 --- a/abci/types/typespb_test.go +++ b/abci/types/typespb_test.go @@ -1703,6 +1703,62 @@ func TestLastCommitInfoMarshalTo(t *testing.T) { } } +func TestEventProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedEvent(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Event{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestEventMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedEvent(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Event{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestHeaderProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -2747,6 +2803,24 @@ func TestLastCommitInfoJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } +func TestEventJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedEvent(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Event{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} func TestHeaderJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -3749,6 +3823,34 @@ func TestLastCommitInfoProtoCompactText(t *testing.T) { } } +func TestEventProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedEvent(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &Event{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestEventProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedEvent(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &Event{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestHeaderProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -4661,6 +4763,28 @@ func TestLastCommitInfoSize(t *testing.T) { } } +func TestEventSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedEvent(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + func TestHeaderSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) diff --git a/appveyor.yml b/appveyor.yml index 1ddf8fdd..4aa8c2ab 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -7,7 +7,6 @@ clone_folder: c:\go\path\src\github.com\tendermint\tendermint before_build: - cmd: set GOPATH=%GOROOT%\path - cmd: set PATH=%GOPATH%\bin;%PATH% -- cmd: make get_vendor_deps build_script: - cmd: make test test: off diff --git a/behaviour/peer_behaviour.go b/behaviour/peer_behaviour.go new file mode 100644 index 00000000..36630f46 --- /dev/null +++ b/behaviour/peer_behaviour.go @@ -0,0 +1,49 @@ +package behaviour + +import ( + "github.com/tendermint/tendermint/p2p" +) + +// PeerBehaviour is a struct describing a behaviour a peer performed. +// `peerID` identifies the peer and reason characterizes the specific +// behaviour performed by the peer. +type PeerBehaviour struct { + peerID p2p.ID + reason interface{} +} + +type badMessage struct { + explanation string +} + +// BadMessage returns a badMessage PeerBehaviour. +func BadMessage(peerID p2p.ID, explanation string) PeerBehaviour { + return PeerBehaviour{peerID: peerID, reason: badMessage{explanation}} +} + +type messageOutOfOrder struct { + explanation string +} + +// MessageOutOfOrder returns a messagOutOfOrder PeerBehaviour. +func MessageOutOfOrder(peerID p2p.ID, explanation string) PeerBehaviour { + return PeerBehaviour{peerID: peerID, reason: badMessage{explanation}} +} + +type consensusVote struct { + explanation string +} + +// ConsensusVote returns a consensusVote PeerBehaviour. +func ConsensusVote(peerID p2p.ID, explanation string) PeerBehaviour { + return PeerBehaviour{peerID: peerID, reason: consensusVote{explanation}} +} + +type blockPart struct { + explanation string +} + +// BlockPart returns blockPart PeerBehaviour. +func BlockPart(peerID p2p.ID, explanation string) PeerBehaviour { + return PeerBehaviour{peerID: peerID, reason: blockPart{explanation}} +} diff --git a/behaviour/reporter.go b/behaviour/reporter.go new file mode 100644 index 00000000..f8a0693b --- /dev/null +++ b/behaviour/reporter.go @@ -0,0 +1,84 @@ +package behaviour + +import ( + "errors" + "sync" + + "github.com/tendermint/tendermint/p2p" +) + +// Reporter provides an interface for reactors to report the behaviour +// of peers synchronously to other components. +type Reporter interface { + Report(behaviour PeerBehaviour) error +} + +// SwitchReporter reports peer behaviour to an internal Switch. +type SwitchReporter struct { + sw *p2p.Switch +} + +// NewSwitchReporter return a new SwitchReporter instance which wraps the Switch. +func NewSwitcReporter(sw *p2p.Switch) *SwitchReporter { + return &SwitchReporter{ + sw: sw, + } +} + +// Report reports the behaviour of a peer to the Switch. +func (spbr *SwitchReporter) Report(behaviour PeerBehaviour) error { + peer := spbr.sw.Peers().Get(behaviour.peerID) + if peer == nil { + return errors.New("peer not found") + } + + switch reason := behaviour.reason.(type) { + case consensusVote, blockPart: + spbr.sw.MarkPeerAsGood(peer) + case badMessage: + spbr.sw.StopPeerForError(peer, reason.explanation) + case messageOutOfOrder: + spbr.sw.StopPeerForError(peer, reason.explanation) + default: + return errors.New("unknown reason reported") + } + + return nil +} + +// MockReporter is a concrete implementation of the Reporter +// interface used in reactor tests to ensure reactors report the correct +// behaviour in manufactured scenarios. +type MockReporter struct { + mtx sync.RWMutex + pb map[p2p.ID][]PeerBehaviour +} + +// NewMockReporter returns a Reporter which records all reported +// behaviours in memory. +func NewMockReporter() *MockReporter { + return &MockReporter{ + pb: map[p2p.ID][]PeerBehaviour{}, + } +} + +// Report stores the PeerBehaviour produced by the peer identified by peerID. +func (mpbr *MockReporter) Report(behaviour PeerBehaviour) { + mpbr.mtx.Lock() + defer mpbr.mtx.Unlock() + mpbr.pb[behaviour.peerID] = append(mpbr.pb[behaviour.peerID], behaviour) +} + +// GetBehaviours returns all behaviours reported on the peer identified by peerID. +func (mpbr *MockReporter) GetBehaviours(peerID p2p.ID) []PeerBehaviour { + mpbr.mtx.RLock() + defer mpbr.mtx.RUnlock() + if items, ok := mpbr.pb[peerID]; ok { + result := make([]PeerBehaviour, len(items)) + copy(result, items) + + return result + } else { + return []PeerBehaviour{} + } +} diff --git a/behaviour/reporter_test.go b/behaviour/reporter_test.go new file mode 100644 index 00000000..eae94e7b --- /dev/null +++ b/behaviour/reporter_test.go @@ -0,0 +1,186 @@ +package behaviour_test + +import ( + "sync" + "testing" + + bh "github.com/tendermint/tendermint/behaviour" + "github.com/tendermint/tendermint/p2p" +) + +// TestMockReporter tests the MockReporter's ability to store reported +// peer behaviour in memory indexed by the peerID. +func TestMockReporter(t *testing.T) { + var peerID p2p.ID = "MockPeer" + pr := bh.NewMockReporter() + + behaviours := pr.GetBehaviours(peerID) + if len(behaviours) != 0 { + t.Error("Expected to have no behaviours reported") + } + + badMessage := bh.BadMessage(peerID, "bad message") + pr.Report(badMessage) + behaviours = pr.GetBehaviours(peerID) + if len(behaviours) != 1 { + t.Error("Expected the peer have one reported behaviour") + } + + if behaviours[0] != badMessage { + t.Error("Expected Bad Message to have been reported") + } +} + +type scriptItem struct { + peerID p2p.ID + behaviour bh.PeerBehaviour +} + +// equalBehaviours returns true if a and b contain the same PeerBehaviours with +// the same freequencies and otherwise false. +func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool { + aHistogram := map[bh.PeerBehaviour]int{} + bHistogram := map[bh.PeerBehaviour]int{} + + for _, behaviour := range a { + aHistogram[behaviour] += 1 + } + + for _, behaviour := range b { + bHistogram[behaviour] += 1 + } + + if len(aHistogram) != len(bHistogram) { + return false + } + + for _, behaviour := range a { + if aHistogram[behaviour] != bHistogram[behaviour] { + return false + } + } + + for _, behaviour := range b { + if bHistogram[behaviour] != aHistogram[behaviour] { + return false + } + } + + return true +} + +// TestEqualPeerBehaviours tests that equalBehaviours can tell that two slices +// of peer behaviours can be compared for the behaviours they contain and the +// freequencies that those behaviours occur. +func TestEqualPeerBehaviours(t *testing.T) { + var ( + peerID p2p.ID = "MockPeer" + consensusVote = bh.ConsensusVote(peerID, "voted") + blockPart = bh.BlockPart(peerID, "blocked") + equals = []struct { + left []bh.PeerBehaviour + right []bh.PeerBehaviour + }{ + // Empty sets + {[]bh.PeerBehaviour{}, []bh.PeerBehaviour{}}, + // Single behaviours + {[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{consensusVote}}, + // Equal Frequencies + {[]bh.PeerBehaviour{consensusVote, consensusVote}, + []bh.PeerBehaviour{consensusVote, consensusVote}}, + // Equal frequencies different orders + {[]bh.PeerBehaviour{consensusVote, blockPart}, + []bh.PeerBehaviour{blockPart, consensusVote}}, + } + unequals = []struct { + left []bh.PeerBehaviour + right []bh.PeerBehaviour + }{ + // Comparing empty sets to non empty sets + {[]bh.PeerBehaviour{}, []bh.PeerBehaviour{consensusVote}}, + // Different behaviours + {[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{blockPart}}, + // Same behaviour with different frequencies + {[]bh.PeerBehaviour{consensusVote}, + []bh.PeerBehaviour{consensusVote, consensusVote}}, + } + ) + + for _, test := range equals { + if !equalBehaviours(test.left, test.right) { + t.Errorf("Expected %#v and %#v to be equal", test.left, test.right) + } + } + + for _, test := range unequals { + if equalBehaviours(test.left, test.right) { + t.Errorf("Expected %#v and %#v to be unequal", test.left, test.right) + } + } +} + +// TestPeerBehaviourConcurrency constructs a scenario in which +// multiple goroutines are using the same MockReporter instance. +// This test reproduces the conditions in which MockReporter will +// be used within a Reactor `Receive` method tests to ensure thread safety. +func TestMockPeerBehaviourReporterConcurrency(t *testing.T) { + var ( + behaviourScript = []struct { + peerID p2p.ID + behaviours []bh.PeerBehaviour + }{ + {"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}}, + {"2", []bh.PeerBehaviour{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}}, + {"3", []bh.PeerBehaviour{bh.BlockPart("3", ""), bh.ConsensusVote("3", ""), bh.BlockPart("3", ""), bh.ConsensusVote("3", "")}}, + {"4", []bh.PeerBehaviour{bh.ConsensusVote("4", ""), bh.ConsensusVote("4", ""), bh.ConsensusVote("4", ""), bh.ConsensusVote("4", "")}}, + {"5", []bh.PeerBehaviour{bh.BlockPart("5", ""), bh.ConsensusVote("5", ""), bh.BlockPart("5", ""), bh.ConsensusVote("5", "")}}, + } + ) + + var receiveWg sync.WaitGroup + pr := bh.NewMockReporter() + scriptItems := make(chan scriptItem) + done := make(chan int) + numConsumers := 3 + for i := 0; i < numConsumers; i++ { + receiveWg.Add(1) + go func() { + defer receiveWg.Done() + for { + select { + case pb := <-scriptItems: + pr.Report(pb.behaviour) + case <-done: + return + } + } + }() + } + + var sendingWg sync.WaitGroup + sendingWg.Add(1) + go func() { + defer sendingWg.Done() + for _, item := range behaviourScript { + for _, reason := range item.behaviours { + scriptItems <- scriptItem{item.peerID, reason} + } + } + }() + + sendingWg.Wait() + + for i := 0; i < numConsumers; i++ { + done <- 1 + } + + receiveWg.Wait() + + for _, items := range behaviourScript { + reported := pr.GetBehaviours(items.peerID) + if !equalBehaviours(reported, items.behaviours) { + t.Errorf("Expected peer %s to have behaved \nExpected: %#v \nGot %#v \n", + items.peerID, items.behaviours, reported) + } + } +} diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index 388d26ca..2f527ee9 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -15,6 +15,7 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" @@ -95,7 +96,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals fastSync := true db := dbm.NewMemDB() blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(), - sm.MockMempool{}, sm.MockEvidencePool{}) + mock.Mempool{}, sm.MockEvidencePool{}) sm.SaveState(db, state) // let's add some blocks in @@ -292,6 +293,10 @@ func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { return abci.ResponseEndBlock{} } +func (app *testApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { + return abci.ResponseDeliverTx{Events: []abci.Event{}} +} + func (app *testApp) CheckTx(tx []byte) abci.ResponseCheckTx { return abci.ResponseCheckTx{} } diff --git a/blockchainexp/reactor_test.go b/blockchainexp/reactor_test.go index 6a3e8b2f..bf376a7f 100644 --- a/blockchainexp/reactor_test.go +++ b/blockchainexp/reactor_test.go @@ -14,6 +14,7 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" @@ -93,8 +94,10 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals // NOTE we have to create and commit the blocks first because // pool.height is determined from the store. fastSync := true - blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), proxyApp.Consensus(), - sm.MockMempool{}, sm.MockEvidencePool{}) + db := dbm.NewMemDB() + blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(), + mock.Mempool{}, sm.MockEvidencePool{}) + sm.SaveState(db, state) // let's add some blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index 055a76c5..4e8bde8b 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -18,6 +18,12 @@ var ResetAllCmd = &cobra.Command{ Run: resetAll, } +var keepAddrBook bool + +func init() { + ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "Keep the address book intact") +} + // ResetPrivValidatorCmd resets the private validator files. var ResetPrivValidatorCmd = &cobra.Command{ Use: "unsafe_reset_priv_validator", @@ -41,7 +47,11 @@ func resetPrivValidator(cmd *cobra.Command, args []string) { // ResetAll removes address book files plus all data, and resets the privValdiator data. // Exported so other CLI tools can use it. func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) { - removeAddrBook(addrBookFile, logger) + if keepAddrBook { + logger.Info("The address book remains intact") + } else { + removeAddrBook(addrBookFile, logger) + } if err := os.RemoveAll(dbDir); err == nil { logger.Info("Removed all blockchain history", "dir", dbDir) } else { diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index e34b8d30..f1dd6f16 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/spf13/cobra" + "github.com/spf13/viper" cfg "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" @@ -20,13 +21,17 @@ import ( var ( nValidators int nNonValidators int + configFile string outputDir string nodeDirPrefix string populatePersistentPeers bool hostnamePrefix string + hostnameSuffix string startingIPAddress string + hostnames []string p2pPort int + randomMonikers bool ) const ( @@ -36,6 +41,8 @@ const ( func init() { TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4, "Number of validators to initialize the testnet with") + TestnetFilesCmd.Flags().StringVar(&configFile, "config", "", + "Config file to use (note some options may be overwritten)") TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0, "Number of non-validators to initialize the testnet with") TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet", @@ -46,11 +53,17 @@ func init() { TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true, "Update config of each node with the list of persistent peers build using either hostname-prefix or starting-ip-address") TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node", - "Hostname prefix (node results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)") + "Hostname prefix (\"node\" results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)") + TestnetFilesCmd.Flags().StringVar(&hostnameSuffix, "hostname-suffix", "", + "Hostname suffix (\".xyz.com\" results in persistent peers list ID0@node0.xyz.com:26656, ID1@node1.xyz.com:26656, ...)") TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "", - "Starting IP address (192.168.0.1 results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)") + "Starting IP address (\"192.168.0.1\" results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)") + TestnetFilesCmd.Flags().StringArrayVar(&hostnames, "hostname", []string{}, + "Manually override all hostnames of validators and non-validators (use --hostname multiple times for multiple hosts)") TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 26656, "P2P Port") + TestnetFilesCmd.Flags().BoolVar(&randomMonikers, "random-monikers", false, + "Randomize the moniker for each generated node") } // TestnetFilesCmd allows initialisation of files for a Tendermint testnet. @@ -72,7 +85,29 @@ Example: } func testnetFiles(cmd *cobra.Command, args []string) error { + if len(hostnames) > 0 && len(hostnames) != (nValidators+nNonValidators) { + return fmt.Errorf( + "testnet needs precisely %d hostnames (number of validators plus non-validators) if --hostname parameter is used", + nValidators+nNonValidators, + ) + } + config := cfg.DefaultConfig() + + // overwrite default config if set and valid + if configFile != "" { + viper.SetConfigFile(configFile) + if err := viper.ReadInConfig(); err != nil { + return err + } + if err := viper.Unmarshal(config); err != nil { + return err + } + if err := config.ValidateBasic(); err != nil { + return err + } + } + genVals := make([]types.GenesisValidator, nValidators) for i := 0; i < nValidators; i++ { @@ -162,6 +197,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error { if populatePersistentPeers { config.P2P.PersistentPeers = persistentPeers } + config.Moniker = moniker(i) cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config) } @@ -171,21 +207,23 @@ func testnetFiles(cmd *cobra.Command, args []string) error { } func hostnameOrIP(i int) string { - if startingIPAddress != "" { - ip := net.ParseIP(startingIPAddress) - ip = ip.To4() - if ip == nil { - fmt.Printf("%v: non ipv4 address\n", startingIPAddress) - os.Exit(1) - } - - for j := 0; j < i; j++ { - ip[3]++ - } - return ip.String() + if len(hostnames) > 0 && i < len(hostnames) { + return hostnames[i] + } + if startingIPAddress == "" { + return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix) + } + ip := net.ParseIP(startingIPAddress) + ip = ip.To4() + if ip == nil { + fmt.Printf("%v: non ipv4 address\n", startingIPAddress) + os.Exit(1) } - return fmt.Sprintf("%s%d", hostnamePrefix, i) + for j := 0; j < i; j++ { + ip[3]++ + } + return ip.String() } func persistentPeersString(config *cfg.Config) (string, error) { @@ -201,3 +239,20 @@ func persistentPeersString(config *cfg.Config) (string, error) { } return strings.Join(persistentPeers, ","), nil } + +func moniker(i int) string { + if randomMonikers { + return randomMoniker() + } + if len(hostnames) > 0 && i < len(hostnames) { + return hostnames[i] + } + if startingIPAddress == "" { + return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix) + } + return randomMoniker() +} + +func randomMoniker() string { + return cmn.HexBytes(cmn.RandBytes(8)).String() +} diff --git a/config/config.go b/config/config.go index 8c5a83dc..921171df 100644 --- a/config/config.go +++ b/config/config.go @@ -159,7 +159,18 @@ type BaseConfig struct { // and verifying their commits FastSync bool `mapstructure:"fast_sync"` - // Database backend: leveldb | memdb | cleveldb + // Database backend: goleveldb | cleveldb | boltdb + // * goleveldb (github.com/syndtr/goleveldb - most popular implementation) + // - pure go + // - stable + // * cleveldb (uses levigo wrapper) + // - fast + // - requires gcc + // - use cleveldb build tag (go build -tags cleveldb) + // * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) + // - EXPERIMENTAL + // - may be faster is some use-cases (random reads - indexer) + // - use boltdb build tag (go build -tags boltdb) DBBackend string `mapstructure:"db_backend"` // Database directory @@ -213,7 +224,7 @@ func DefaultBaseConfig() BaseConfig { ProfListenAddress: "", FastSync: true, FilterPeers: false, - DBBackend: "leveldb", + DBBackend: "goleveldb", DBPath: "data", } } diff --git a/config/toml.go b/config/toml.go index f78d680e..ee6a25ac 100644 --- a/config/toml.go +++ b/config/toml.go @@ -28,13 +28,13 @@ func init() { // and panics if it fails. func EnsureRoot(rootDir string) { if err := cmn.EnsureDir(rootDir, DefaultDirPerm); err != nil { - cmn.PanicSanity(err.Error()) + panic(err.Error()) } if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil { - cmn.PanicSanity(err.Error()) + panic(err.Error()) } if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil { - cmn.PanicSanity(err.Error()) + panic(err.Error()) } configFilePath := filepath.Join(rootDir, defaultConfigFilePath) @@ -82,7 +82,18 @@ moniker = "{{ .BaseConfig.Moniker }}" # and verifying their commits fast_sync = {{ .BaseConfig.FastSync }} -# Database backend: leveldb | memdb | cleveldb +# Database backend: goleveldb | cleveldb | boltdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) db_backend = "{{ .BaseConfig.DBBackend }}" # Database directory diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 0c1b8856..c2eb114d 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -69,7 +70,7 @@ func TestByzantine(t *testing.T) { blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) require.NoError(t, err) - conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states + conR := NewConsensusReactor(css[i], true) // so we don't start the consensus states conR.SetLogger(logger.With("validator", i)) conR.SetEventBus(eventBus) @@ -81,6 +82,7 @@ func TestByzantine(t *testing.T) { } reactors[i] = conRI + sm.SaveState(css[i].blockExec.DB(), css[i].state) //for save height 1's validators info } defer func() { @@ -268,3 +270,4 @@ func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) { func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { br.reactor.Receive(chID, peer, msgBytes) } +func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer } diff --git a/consensus/common_test.go b/consensus/common_test.go index 1346868a..91fa831d 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -14,6 +14,8 @@ import ( "github.com/go-kit/kit/log/term" + "path" + abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" @@ -119,6 +121,24 @@ func incrementRound(vss ...*validatorStub) { } } +type ValidatorStubsByAddress []*validatorStub + +func (vss ValidatorStubsByAddress) Len() int { + return len(vss) +} + +func (vss ValidatorStubsByAddress) Less(i, j int) bool { + return bytes.Compare(vss[i].GetPubKey().Address(), vss[j].GetPubKey().Address()) == -1 +} + +func (vss ValidatorStubsByAddress) Swap(i, j int) { + it := vss[i] + vss[i] = vss[j] + vss[i].Index = i + vss[j] = it + vss[j].Index = j +} + //------------------------------------------------------------------------------- // Functions for transitioning the consensus state @@ -228,7 +248,7 @@ func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lo } func subscribeToVoter(cs *ConsensusState, addr []byte) <-chan tmpubsub.Message { - votesSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote) + votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote) if err != nil { panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote)) } @@ -268,7 +288,7 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S proxyAppConnCon := abcicli.NewLocalClient(mtx, app) // Make Mempool - mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0) + mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) mempool.SetLogger(log.TestingLogger().With("module", "mempool")) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() @@ -278,7 +298,8 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S evpool := sm.MockEvidencePool{} // Make ConsensusState - stateDB := dbm.NewMemDB() + stateDB := blockDB + sm.SaveState(stateDB, state) //for save height 1's validators info blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool) cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetLogger(log.TestingLogger().With("module", "consensus")) @@ -351,7 +372,7 @@ func ensureNoNewUnlock(unlockCh <-chan tmpubsub.Message) { } func ensureNoNewTimeout(stepCh <-chan tmpubsub.Message, timeout int64) { - timeoutDuration := time.Duration(timeout*5) * time.Nanosecond + timeoutDuration := time.Duration(timeout*10) * time.Nanosecond ensureNoNewEvent( stepCh, timeoutDuration, @@ -398,7 +419,7 @@ func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int) { } func ensureNewTimeout(timeoutCh <-chan tmpubsub.Message, height int64, round int, timeout int64) { - timeoutDuration := time.Duration(timeout*5) * time.Nanosecond + timeoutDuration := time.Duration(timeout*10) * time.Nanosecond ensureNewEvent(timeoutCh, height, round, timeoutDuration, "Timeout expired while waiting for NewTimeout event") } @@ -564,7 +585,7 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) - css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app) + css[i] = newConsensusStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB) css[i].SetTimeoutTicker(tickerFunc()) css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } @@ -576,12 +597,11 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou } // nPeers = nValidators + nNotValidator -func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, - appFunc func() abci.Application) ([]*ConsensusState, cleanupFunc) { - +func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func(string) abci.Application) ([]*ConsensusState, *types.GenesisDoc, *cfg.Config, cleanupFunc) { genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower) css := make([]*ConsensusState, nPeers) logger := consensusLogger() + var peer0Config *cfg.Config configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { stateDB := dbm.NewMemDB() // each state needs its own db @@ -589,6 +609,9 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) configRootDirs = append(configRootDirs, thisConfig.RootDir) ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + if i == 0 { + peer0Config = thisConfig + } var privVal types.PrivValidator if i < nValidators { privVal = privVals[i] @@ -605,15 +628,19 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF privVal = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) } - app := appFunc() + app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i))) vals := types.TM2PB.ValidatorUpdates(state.Validators) + if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok { + state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version. If don't do this, replay test will fail + } app.InitChain(abci.RequestInitChain{Validators: vals}) + //sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app) css[i].SetTimeoutTicker(tickerFunc()) css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } - return css, func() { + return css, genDoc, peer0Config, func() { for _, dir := range configRootDirs { os.RemoveAll(dir) } @@ -719,3 +746,7 @@ func newPersistentKVStore() abci.Application { } return kvstore.NewPersistentKVStoreApplication(dir) } + +func newPersistentKVStoreWithPath(dbDir string) abci.Application { + return kvstore.NewPersistentKVStoreApplication(dbDir) +} diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index e7669b17..af15a1fe 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -11,13 +11,15 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" + dbm "github.com/tendermint/tendermint/libs/db" + mempl "github.com/tendermint/tendermint/mempool" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) // for testing -func assertMempool(txn txNotifier) sm.Mempool { - return txn.(sm.Mempool) +func assertMempool(txn txNotifier) mempl.Mempool { + return txn.(mempl.Mempool) } func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { @@ -106,7 +108,9 @@ func deliverTxsRange(cs *ConsensusState, start, end int) { func TestMempoolTxConcurrentWithCommit(t *testing.T) { state, privVals := randGenesisState(1, false, 10) - cs := newConsensusState(state, privVals[0], NewCounterApplication()) + blockDB := dbm.NewMemDB() + cs := newConsensusStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB) + sm.SaveState(blockDB, state) height, round := cs.Height, cs.Round newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) @@ -129,7 +133,9 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { func TestMempoolRmBadTx(t *testing.T) { state, privVals := randGenesisState(1, false, 10) app := NewCounterApplication() - cs := newConsensusState(state, privVals[0], app) + blockDB := dbm.NewMemDB() + cs := newConsensusStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB) + sm.SaveState(blockDB, state) // increment the counter by 1 txBytes := make([]byte, 8) diff --git a/consensus/reactor.go b/consensus/reactor.go index 604e54b4..36e948f6 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -155,16 +155,24 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor { } } -// AddPeer implements Reactor +// InitPeer implements Reactor by creating a state for the peer. +func (conR *ConsensusReactor) InitPeer(peer p2p.Peer) p2p.Peer { + peerState := NewPeerState(peer).SetLogger(conR.Logger) + peer.Set(types.PeerStateKey, peerState) + return peer +} + +// AddPeer implements Reactor by spawning multiple gossiping goroutines for the +// peer. func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) { if !conR.IsRunning() { return } - // Create peerState for peer - peerState := NewPeerState(peer).SetLogger(conR.Logger) - peer.Set(types.PeerStateKey, peerState) - + peerState, ok := peer.Get(types.PeerStateKey).(*PeerState) + if !ok { + panic(fmt.Sprintf("peer %v has no state", peer)) + } // Begin routines for this peer. go conR.gossipDataRoutine(peer, peerState) go conR.gossipVotesRoutine(peer, peerState) @@ -177,7 +185,7 @@ func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) { } } -// RemovePeer implements Reactor +// RemovePeer is a noop. func (conR *ConsensusReactor) RemovePeer(peer p2p.Peer, reason interface{}) { if !conR.IsRunning() { return @@ -491,7 +499,7 @@ OUTER_LOOP: if prs.ProposalBlockParts == nil { blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) if blockMeta == nil { - cmn.PanicCrisis(fmt.Sprintf("Failed to load block %d when blockStore is at %d", + panic(fmt.Sprintf("Failed to load block %d when blockStore is at %d", prs.Height, conR.conS.blockStore.Height())) } ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader) @@ -1110,7 +1118,7 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida NOTE: This is wrong, 'round' could change. e.g. if orig round is not the same as block LastCommit round. if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round { - cmn.PanicSanity(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round)) + panic(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round)) } */ if ps.PRS.CatchupCommitRound == round { diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index fba6570a..cf6cccea 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -22,6 +22,7 @@ import ( "github.com/tendermint/tendermint/libs/log" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/mock" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/tmstore" "github.com/tendermint/tendermint/types" @@ -51,6 +52,10 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ( blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) require.NoError(t, err) blocksSubs = append(blocksSubs, blocksSub) + + if css[i].state.LastBlockHeight == 0 { //simulate handle initChain in handshake + sm.SaveState(css[i].blockExec.DB(), css[i].state) + } } // make connected switches and start all reactors p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { @@ -136,7 +141,7 @@ func TestReactorWithEvidence(t *testing.T) { proxyAppConnCon := abcicli.NewLocalClient(mtx, app) // Make Mempool - mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0) + mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) mempool.SetLogger(log.TestingLogger().With("module", "mempool")) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() @@ -239,6 +244,49 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { }, css) } +func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { + N := 1 + css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) + defer cleanup() + reactors, _, eventBuses := startConsensusNet(t, css, N) + defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) + + var ( + reactor = reactors[0] + peer = mock.NewPeer(nil) + msg = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType}) + ) + + reactor.InitPeer(peer) + + // simulate switch calling Receive before AddPeer + assert.NotPanics(t, func() { + reactor.Receive(StateChannel, peer, msg) + reactor.AddPeer(peer) + }) +} + +func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) { + N := 1 + css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) + defer cleanup() + reactors, _, eventBuses := startConsensusNet(t, css, N) + defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) + + var ( + reactor = reactors[0] + peer = mock.NewPeer(nil) + msg = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType}) + ) + + // we should call InitPeer here + + // simulate switch calling Receive before AddPeer + assert.Panics(t, func() { + reactor.Receive(StateChannel, peer, msg) + }) +} + // Test we record stats about votes and block parts from other peers. func TestReactorRecordsVotesAndBlockParts(t *testing.T) { N := 4 @@ -329,7 +377,8 @@ func TestReactorVotingPowerChange(t *testing.T) { func TestReactorValidatorSetChanges(t *testing.T) { nPeers := 7 nVals := 4 - css, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore) + css, _, _, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStoreWithPath) + defer cleanup() logger := log.TestingLogger() diff --git a/consensus/replay.go b/consensus/replay.go index e47d4892..794f870d 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -13,10 +13,10 @@ import ( abci "github.com/tendermint/tendermint/abci/types" //auto "github.com/tendermint/tendermint/libs/autofile" - cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" - + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" @@ -230,6 +230,7 @@ func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) { h.eventBus = eventBus } +// NBlocks returns the number of blocks applied to the state. func (h *Handshaker) NBlocks() int { return h.nBlocks } @@ -257,13 +258,15 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { ) // Set AppVersion on the state. - h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion) - sm.SaveState(h.stateDB, h.initialState) + if h.initialState.Version.Consensus.App != version.Protocol(res.AppVersion) { + h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion) + sm.SaveState(h.stateDB, h.initialState) + } // Replay blocks up to the latest in the blockstore. _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) if err != nil { - return fmt.Errorf("Error on replay: %v", err) + return fmt.Errorf("error on replay: %v", err) } h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", @@ -274,7 +277,8 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { return nil } -// Replay all blocks since appBlockHeight and ensure the result matches the current state. +// ReplayBlocks replays all blocks since appBlockHeight and ensures the result +// matches the current state. // Returns the final AppHash or an error. func (h *Handshaker) ReplayBlocks( state sm.State, @@ -319,7 +323,7 @@ func (h *Handshaker) ReplayBlocks( } else { // If validator set is not set in genesis and still empty after InitChain, exit. if len(h.genDoc.Validators) == 0 { - return nil, fmt.Errorf("Validator set is nil in genesis and still empty after InitChain") + return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain") } } @@ -332,7 +336,8 @@ func (h *Handshaker) ReplayBlocks( // First handle edge cases and constraints on the storeBlockHeight. if storeBlockHeight == 0 { - return appHash, checkAppHash(state, appHash) + assertAppHashEqualsOneFromState(appHash, state) + return appHash, nil } else if storeBlockHeight < appBlockHeight { // the app should never be ahead of the store (but this is under app's control) @@ -340,11 +345,11 @@ func (h *Handshaker) ReplayBlocks( } else if storeBlockHeight < stateBlockHeight { // the state should never be ahead of the store (this is under tendermint's control) - cmn.PanicSanity(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight)) + panic(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight)) } else if storeBlockHeight > stateBlockHeight+1 { // store should be at most one ahead of the state (this is under tendermint's control) - cmn.PanicSanity(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1)) + panic(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1)) } var err error @@ -359,7 +364,8 @@ func (h *Handshaker) ReplayBlocks( } else if appBlockHeight == storeBlockHeight { // We're good! - return appHash, checkAppHash(state, appHash) + assertAppHashEqualsOneFromState(appHash, state) + return appHash, nil } } else if storeBlockHeight == stateBlockHeight+1 { @@ -380,7 +386,7 @@ func (h *Handshaker) ReplayBlocks( return state.AppHash, err } else if appBlockHeight == storeBlockHeight { - // We ran Commit, but didn't save the state, so replayBlock with mock app + // We ran Commit, but didn't save the state, so replayBlock with mock app. abciResponses, err := sm.LoadABCIResponses(h.stateDB, storeBlockHeight) if err != nil { return nil, err @@ -393,8 +399,8 @@ func (h *Handshaker) ReplayBlocks( } - cmn.PanicSanity("Should never happen") - return nil, nil + panic(fmt.Sprintf("uncovered case! appHeight: %d, storeHeight: %d, stateHeight: %d", + appBlockHeight, storeBlockHeight, stateBlockHeight)) } func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int64, mutateState bool) ([]byte, error) { @@ -417,7 +423,12 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl for i := appBlockHeight + 1; i <= finalBlock; i++ { h.logger.Info("Applying block", "height", i) block := h.store.LoadBlock(i) - appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, state.LastValidators, h.stateDB) + // Extra check to ensure the app was not changed in a way it shouldn't have. + if len(appHash) > 0 { + assertAppHashEqualsOneFromBlock(appHash, block) + } + + appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateDB) if err != nil { return nil, err } @@ -434,7 +445,8 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl appHash = state.AppHash } - return appHash, checkAppHash(state, appHash) + assertAppHashEqualsOneFromState(appHash, state) + return appHash, nil } // ApplyBlock on the proxyApp with the last block. @@ -442,7 +454,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap block := h.store.LoadBlock(height) meta := h.store.LoadBlockMeta(height) - blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, sm.MockMempool{}, sm.MockEvidencePool{}) + blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, mock.Mempool{}, sm.MockEvidencePool{}) blockExec.SetEventBus(h.eventBus) var err error @@ -456,11 +468,26 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap return state, nil } -func checkAppHash(state sm.State, appHash []byte) error { - if !bytes.Equal(state.AppHash, appHash) { - panic(fmt.Errorf("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, state.AppHash).Error()) +func assertAppHashEqualsOneFromBlock(appHash []byte, block *types.Block) { + if !bytes.Equal(appHash, block.AppHash) { + panic(fmt.Sprintf(`block.AppHash does not match AppHash after replay. Got %X, expected %X. + +Block: %v +`, + appHash, block.AppHash, block)) + } +} + +func assertAppHashEqualsOneFromState(appHash []byte, state sm.State) { + if !bytes.Equal(appHash, state.AppHash) { + panic(fmt.Sprintf(`state.AppHash does not match AppHash after replay. Got +%X, expected %X. + +State: %v + +Did you reset Tendermint without resetting your application's data?`, + appHash, state.AppHash, state)) } - return nil } //-------------------------------------------------------------------------------- @@ -491,6 +518,9 @@ type mockProxyApp struct { func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { r := mock.abciResponses.DeliverTx[mock.txCount] mock.txCount++ + if r == nil { //it could be nil because of amino unMarshall, it will cause an empty ResponseDeliverTx to become nil + return abci.ResponseDeliverTx{} + } return *r } diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 467aeb31..41fa8bf9 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -15,6 +15,7 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/tmstore" @@ -312,7 +313,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo cmn.Exit(fmt.Sprintf("Error on handshake: %v", err)) } - mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{} + mempool, evpool := mock.Mempool{}, sm.MockEvidencePool{} blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) consensusState := NewConsensusState(csConfig, state.Copy(), blockExec, diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 86dca765..724dd056 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -7,7 +7,7 @@ import ( "io" "io/ioutil" "os" - "path" + "path/filepath" "runtime" "testing" "time" @@ -15,16 +15,21 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "sort" + "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" ) @@ -88,7 +93,7 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig * } } -func sendTxs(cs *ConsensusState, ctx context.Context) { +func sendTxs(ctx context.Context, cs *ConsensusState) { for i := 0; i < 256; i++ { select { case <-ctx.Done(): @@ -113,7 +118,7 @@ func TestWALCrash(t *testing.T) { 1}, {"many non-empty blocks", func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) { - go sendTxs(cs, ctx) + go sendTxs(ctx, cs) }, 3}, } @@ -138,10 +143,10 @@ LOOP: // create consensus state from a clean slate logger := log.NewNopLogger() - stateDB := dbm.NewMemDB() + blockDB := dbm.NewMemDB() + stateDB := blockDB state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) privValidator := loadPrivValidator(consensusReplayConfig) - blockDB := dbm.NewMemDB() cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB) cs.SetLogger(logger) @@ -260,15 +265,23 @@ func (w *crashingWAL) Stop() error { return w.next.Stop() } func (w *crashingWAL) Wait() { w.next.Wait() } //------------------------------------------------------------------------------------------ -// Handshake Tests +type testSim struct { + GenesisState sm.State + Config *cfg.Config + Chain []*types.Block + Commits []*types.Commit + CleanupFunc cleanupFunc +} const ( - NUM_BLOCKS = 6 + numBlocks = 6 ) var ( - mempool = sm.MockMempool{} + mempool = mock.Mempool{} evpool = sm.MockEvidencePool{} + + sim testSim ) //--------------------------------------- @@ -279,93 +292,356 @@ var ( // 2 - save block and committed but state is behind var modes = []uint{0, 1, 2} +// This is actually not a test, it's for storing validator change tx data for testHandshakeReplay +func TestSimulateValidatorsChange(t *testing.T) { + nPeers := 7 + nVals := 4 + css, genDoc, config, cleanup := randConsensusNetWithPeers(nVals, nPeers, "replay_test", newMockTickerFunc(true), newPersistentKVStoreWithPath) + sim.Config = config + sim.GenesisState, _ = sm.MakeGenesisState(genDoc) + sim.CleanupFunc = cleanup + + partSize := types.BlockPartSizeBytes + + newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound) + proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal) + + vss := make([]*validatorStub, nPeers) + for i := 0; i < nPeers; i++ { + vss[i] = NewValidatorStub(css[i].privValidator, i) + } + height, round := css[0].Height, css[0].Round + // start the machine + startTestRound(css[0], height, round) + incrementHeight(vss...) + ensureNewRound(newRoundCh, height, 0) + ensureNewProposal(proposalCh, height, round) + rs := css[0].GetRoundState() + signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) + ensureNewRound(newRoundCh, height+1, 0) + + //height 2 + height++ + incrementHeight(vss...) + newValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1) + newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) + err := assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil) + assert.Nil(t, err) + propBlock, _ := css[0].createProposalBlock() //changeProposer(t, cs1, vs2) + propBlockParts := propBlock.MakePartSet(partSize) + blockID := types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} + proposal := types.NewProposal(vss[1].Height, round, -1, blockID) + if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + + // set the proposal block + if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + ensureNewProposal(proposalCh, height, round) + rs = css[0].GetRoundState() + signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) + ensureNewRound(newRoundCh, height+1, 0) + + //height 3 + height++ + incrementHeight(vss...) + updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) + updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) + err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil) + assert.Nil(t, err) + propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) + propBlockParts = propBlock.MakePartSet(partSize) + blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} + proposal = types.NewProposal(vss[2].Height, round, -1, blockID) + if err := vss[2].SignProposal(config.ChainID(), proposal); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + + // set the proposal block + if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + ensureNewProposal(proposalCh, height, round) + rs = css[0].GetRoundState() + signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) + ensureNewRound(newRoundCh, height+1, 0) + + //height 4 + height++ + incrementHeight(vss...) + newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey() + newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2) + newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) + err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil) + assert.Nil(t, err) + newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey() + newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) + newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) + err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, nil) + assert.Nil(t, err) + propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) + propBlockParts = propBlock.MakePartSet(partSize) + blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} + newVss := make([]*validatorStub, nVals+1) + copy(newVss, vss[:nVals+1]) + sort.Sort(ValidatorStubsByAddress(newVss)) + selfIndex := 0 + for i, vs := range newVss { + if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) { + selfIndex = i + break + } + } + + proposal = types.NewProposal(vss[3].Height, round, -1, blockID) + if err := vss[3].SignProposal(config.ChainID(), proposal); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + + // set the proposal block + if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + ensureNewProposal(proposalCh, height, round) + + removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) + err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx2, nil) + assert.Nil(t, err) + + rs = css[0].GetRoundState() + for i := 0; i < nVals+1; i++ { + if i == selfIndex { + continue + } + signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) + } + + ensureNewRound(newRoundCh, height+1, 0) + + //height 5 + height++ + incrementHeight(vss...) + ensureNewProposal(proposalCh, height, round) + rs = css[0].GetRoundState() + for i := 0; i < nVals+1; i++ { + if i == selfIndex { + continue + } + signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) + } + ensureNewRound(newRoundCh, height+1, 0) + + //height 6 + height++ + incrementHeight(vss...) + removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) + err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, nil) + assert.Nil(t, err) + propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) + propBlockParts = propBlock.MakePartSet(partSize) + blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} + newVss = make([]*validatorStub, nVals+3) + copy(newVss, vss[:nVals+3]) + sort.Sort(ValidatorStubsByAddress(newVss)) + for i, vs := range newVss { + if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) { + selfIndex = i + break + } + } + proposal = types.NewProposal(vss[1].Height, round, -1, blockID) + if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + + // set the proposal block + if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + ensureNewProposal(proposalCh, height, round) + rs = css[0].GetRoundState() + for i := 0; i < nVals+3; i++ { + if i == selfIndex { + continue + } + signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) + } + ensureNewRound(newRoundCh, height+1, 0) + + sim.Chain = make([]*types.Block, 0) + sim.Commits = make([]*types.Commit, 0) + for i := 1; i <= numBlocks; i++ { + sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i))) + sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i))) + } +} + // Sync from scratch func TestHandshakeReplayAll(t *testing.T) { - for i, m := range modes { - config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i)) - defer os.RemoveAll(config.RootDir) - testHandshakeReplay(t, config, 0, m) + for _, m := range modes { + testHandshakeReplay(t, config, 0, m, false) + } + for _, m := range modes { + testHandshakeReplay(t, config, 0, m, true) } } // Sync many, not from scratch func TestHandshakeReplaySome(t *testing.T) { - for i, m := range modes { - config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i)) - defer os.RemoveAll(config.RootDir) - testHandshakeReplay(t, config, 1, m) + for _, m := range modes { + testHandshakeReplay(t, config, 1, m, false) + } + for _, m := range modes { + testHandshakeReplay(t, config, 1, m, true) } } // Sync from lagging by one func TestHandshakeReplayOne(t *testing.T) { - for i, m := range modes { - config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i)) - defer os.RemoveAll(config.RootDir) - testHandshakeReplay(t, config, NUM_BLOCKS-1, m) + for _, m := range modes { + testHandshakeReplay(t, config, numBlocks-1, m, false) + } + for _, m := range modes { + testHandshakeReplay(t, config, numBlocks-1, m, true) } } // Sync from caught up func TestHandshakeReplayNone(t *testing.T) { - for i, m := range modes { - config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i)) - defer os.RemoveAll(config.RootDir) - testHandshakeReplay(t, config, NUM_BLOCKS, m) + for _, m := range modes { + testHandshakeReplay(t, config, numBlocks, m, false) } + for _, m := range modes { + testHandshakeReplay(t, config, numBlocks, m, true) + } +} + +// Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx +func TestMockProxyApp(t *testing.T) { + sim.CleanupFunc() //clean the test env created in TestSimulateValidatorsChange + logger := log.TestingLogger() + var validTxs, invalidTxs = 0, 0 + txIndex := 0 + + assert.NotPanics(t, func() { + abciResWithEmptyDeliverTx := new(sm.ABCIResponses) + abciResWithEmptyDeliverTx.DeliverTx = make([]*abci.ResponseDeliverTx, 0) + abciResWithEmptyDeliverTx.DeliverTx = append(abciResWithEmptyDeliverTx.DeliverTx, &abci.ResponseDeliverTx{}) + + // called when saveABCIResponses: + bytes := cdc.MustMarshalBinaryBare(abciResWithEmptyDeliverTx) + loadedAbciRes := new(sm.ABCIResponses) + + // this also happens sm.LoadABCIResponses + err := cdc.UnmarshalBinaryBare(bytes, loadedAbciRes) + require.NoError(t, err) + + mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes) + + abciRes := new(sm.ABCIResponses) + abciRes.DeliverTx = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTx)) + // Execute transactions and get hash. + proxyCb := func(req *abci.Request, res *abci.Response) { + switch r := res.Value.(type) { + case *abci.Response_DeliverTx: + // TODO: make use of res.Log + // TODO: make use of this info + // Blocks may include invalid txs. + txRes := r.DeliverTx + if txRes.Code == abci.CodeTypeOK { + validTxs++ + } else { + logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log) + invalidTxs++ + } + abciRes.DeliverTx[txIndex] = txRes + txIndex++ + } + } + mock.SetResponseCallback(proxyCb) + + someTx := []byte("tx") + mock.DeliverTxAsync(someTx) + }) + assert.True(t, validTxs == 1) + assert.True(t, invalidTxs == 0) } func tempWALWithData(data []byte) string { walFile, err := ioutil.TempFile("", "wal") if err != nil { - panic(fmt.Errorf("failed to create temp WAL file: %v", err)) + panic(fmt.Sprintf("failed to create temp WAL file: %v", err)) } _, err = walFile.Write(data) if err != nil { - panic(fmt.Errorf("failed to write to temp WAL file: %v", err)) + panic(fmt.Sprintf("failed to write to temp WAL file: %v", err)) } if err := walFile.Close(); err != nil { - panic(fmt.Errorf("failed to close temp WAL file: %v", err)) + panic(fmt.Sprintf("failed to close temp WAL file: %v", err)) } return walFile.Name() } // Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks -func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint) { - walBody, err := WALWithNBlocks(t, NUM_BLOCKS) - require.NoError(t, err) - walFile := tempWALWithData(walBody) - config.Consensus.SetWalFile(walFile) +func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) { + var chain []*types.Block + var commits []*types.Commit + var store *mockBlockStore + var stateDB dbm.DB + var genisisState sm.State + if testValidatorsChange { + testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) + defer os.RemoveAll(testConfig.RootDir) + stateDB = dbm.NewMemDB() + genisisState = sim.GenesisState + config = sim.Config + chain = sim.Chain + commits = sim.Commits + store = newMockBlockStore(config, genisisState.ConsensusParams) + } else { //test single node + testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) + defer os.RemoveAll(testConfig.RootDir) + walBody, err := WALWithNBlocks(t, numBlocks) + require.NoError(t, err) + walFile := tempWALWithData(walBody) + config.Consensus.SetWalFile(walFile) - privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) + privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) - wal, err := NewWAL(walFile) - require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) - err = wal.Start() - require.NoError(t, err) - defer wal.Stop() + wal, err := NewWAL(walFile) + require.NoError(t, err) + wal.SetLogger(log.TestingLogger()) + err = wal.Start() + require.NoError(t, err) + defer wal.Stop() - chain, commits, err := makeBlockchainFromWAL(wal) - require.NoError(t, err) - - stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion) + chain, commits, err = makeBlockchainFromWAL(wal) + require.NoError(t, err) + stateDB, genisisState, store = stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion) + } store.chain = chain store.commits = commits + state := genisisState.Copy() // run the chain through state.ApplyBlock to build up the tendermint state - state = buildTMStateFromChain(config, stateDB, state, chain, mode) + state = buildTMStateFromChain(config, stateDB, state, chain, nBlocks, mode) latestAppHash := state.AppHash // make a new client creator - kvstoreApp := kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "2")) + kvstoreApp := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a", nBlocks, mode))) + clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp) if nBlocks > 0 { // run nBlocks against a new client to build up the app state. // use a throwaway tendermint state proxyApp := proxy.NewAppConns(clientCreator2) - stateDB, state, _ := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion) - buildAppStateFromChain(proxyApp, stateDB, state, chain, nBlocks, mode) + stateDB1 := dbm.NewMemDB() + sm.SaveState(stateDB1, genisisState) + buildAppStateFromChain(proxyApp, stateDB1, genisisState, chain, nBlocks, mode) } // now start the app using the handshake - it should sync @@ -391,8 +667,8 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash) } - expectedBlocksToSync := NUM_BLOCKS - nBlocks - if nBlocks == NUM_BLOCKS && mode > 0 { + expectedBlocksToSync := numBlocks - nBlocks + if nBlocks == numBlocks && mode > 0 { expectedBlocksToSync++ } else if nBlocks > 0 && mode == 1 { expectedBlocksToSync++ @@ -407,7 +683,7 @@ func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.Ap testPartSize := types.BlockPartSizeBytes blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) - blkID := types.BlockID{blk.Hash(), blk.MakePartSet(testPartSize).Header()} + blkID := types.BlockID{Hash: blk.Hash(), PartsHeader: blk.MakePartSet(testPartSize).Header()} newState, err := blockExec.ApplyBlock(st, blkID, blk) if err != nil { panic(err) @@ -423,12 +699,14 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, } defer proxyApp.Stop() + state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ Validators: validators, }); err != nil { panic(err) } + sm.SaveState(stateDB, state) //save height 1's validatorsInfo switch mode { case 0: @@ -451,21 +729,23 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, } -func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, mode uint) sm.State { +func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, nBlocks int, mode uint) sm.State { // run the whole chain against this client to build up the tendermint state - clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "1"))) + clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode)))) proxyApp := proxy.NewAppConns(clientCreator) if err := proxyApp.Start(); err != nil { panic(err) } defer proxyApp.Stop() + state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ Validators: validators, }); err != nil { panic(err) } + sm.SaveState(stateDB, state) //save height 1's validatorsInfo switch mode { case 0: @@ -489,28 +769,162 @@ func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, c return state } +func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { + // 1. Initialize tendermint and commit 3 blocks with the following app hashes: + // - 0x01 + // - 0x02 + // - 0x03 + config := ResetConfig("handshake_test_") + defer os.RemoveAll(config.RootDir) + privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) + const appVersion = 0x0 + stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), appVersion) + genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) + state.LastValidators = state.Validators.Copy() + // mode = 0 for committing all the blocks + blocks := makeBlocks(3, &state, privVal) + store.chain = blocks + + // 2. Tendermint must panic if app returns wrong hash for the first block + // - RANDOM HASH + // - 0x02 + // - 0x03 + { + app := &badApp{numBlocks: 3, allHashesAreWrong: true} + clientCreator := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(clientCreator) + err := proxyApp.Start() + require.NoError(t, err) + defer proxyApp.Stop() + + assert.Panics(t, func() { + h := NewHandshaker(stateDB, state, store, genDoc) + h.Handshake(proxyApp) + }) + } + + // 3. Tendermint must panic if app returns wrong hash for the last block + // - 0x01 + // - 0x02 + // - RANDOM HASH + { + app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} + clientCreator := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(clientCreator) + err := proxyApp.Start() + require.NoError(t, err) + defer proxyApp.Stop() + + assert.Panics(t, func() { + h := NewHandshaker(stateDB, state, store, genDoc) + h.Handshake(proxyApp) + }) + } +} + +func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block { + blocks := make([]*types.Block, 0) + + var ( + prevBlock *types.Block + prevBlockMeta *types.BlockMeta + ) + + appHeight := byte(0x01) + for i := 0; i < n; i++ { + height := int64(i + 1) + + block, parts := makeBlock(*state, prevBlock, prevBlockMeta, privVal, height) + blocks = append(blocks, block) + + prevBlock = block + prevBlockMeta = types.NewBlockMeta(block, parts) + + // update state + state.AppHash = []byte{appHeight} + appHeight++ + state.LastBlockHeight = height + } + + return blocks +} + +func makeVote(header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote { + addr := privVal.GetPubKey().Address() + idx, _ := valset.GetByAddress(addr) + vote := &types.Vote{ + ValidatorAddress: addr, + ValidatorIndex: idx, + Height: header.Height, + Round: 1, + Timestamp: tmtime.Now(), + Type: types.PrecommitType, + BlockID: blockID, + } + + privVal.SignVote(header.ChainID, vote) + + return vote +} + +func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta, + privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) { + + lastCommit := types.NewCommit(types.BlockID{}, nil) + if height > 1 { + vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVal).CommitSig() + lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{vote}) + } + + return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address) +} + +type badApp struct { + abci.BaseApplication + numBlocks byte + height byte + allHashesAreWrong bool + onlyLastHashIsWrong bool +} + +func (app *badApp) Commit() abci.ResponseCommit { + app.height++ + if app.onlyLastHashIsWrong { + if app.height == app.numBlocks { + return abci.ResponseCommit{Data: cmn.RandBytes(8)} + } + return abci.ResponseCommit{Data: []byte{app.height}} + } else if app.allHashesAreWrong { + return abci.ResponseCommit{Data: cmn.RandBytes(8)} + } + + panic("either allHashesAreWrong or onlyLastHashIsWrong must be set") +} + //-------------------------- // utils for making blocks func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { + var height int64 + // Search for height marker - gr, found, err := wal.SearchForEndHeight(0, &WALSearchOptions{}) + gr, found, err := wal.SearchForEndHeight(height, &WALSearchOptions{}) if err != nil { return nil, nil, err } if !found { - return nil, nil, fmt.Errorf("WAL does not contain height %d.", 1) + return nil, nil, fmt.Errorf("WAL does not contain height %d", height) } defer gr.Close() // nolint: errcheck // log.Notice("Build a blockchain by reading from the WAL") - var blocks []*types.Block - var commits []*types.Commit - - var thisBlockParts *types.PartSet - var thisBlockCommit *types.Commit - var height int64 + var ( + blocks []*types.Block + commits []*types.Commit + thisBlockParts *types.PartSet + thisBlockCommit *types.Commit + ) dec := NewWALDecoder(gr) for { @@ -602,7 +1016,8 @@ func stateAndStore(config *cfg.Config, pubKey crypto.PubKey, appVersion version. stateDB := dbm.NewMemDB() state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) state.Version.Consensus.App = appVersion - store := NewMockBlockStore(config, state.ConsensusParams) + store := newMockBlockStore(config, state.ConsensusParams) + sm.SaveState(stateDB, state) return stateDB, state, store } @@ -617,7 +1032,7 @@ type mockBlockStore struct { } // TODO: NewBlockStore(db.NewMemDB) ... -func NewMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { +func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { return &mockBlockStore{config, params, nil, nil} } @@ -626,7 +1041,7 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { block := bs.chain[height-1] return &types.BlockMeta{ - BlockID: types.BlockID{block.Hash(), block.MakePartSet(types.BlockPartSizeBytes).Header()}, + BlockID: types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()}, Header: block.Header, } } @@ -640,15 +1055,16 @@ func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { return bs.commits[height-1] } -//---------------------------------------- +//--------------------------------------- +// Test handshake/init chain -func TestInitChainUpdateValidators(t *testing.T) { +func TestHandshakeUpdatesValidators(t *testing.T) { val, _ := types.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} clientCreator := proxy.NewLocalClientCreator(app) - config := ResetConfig("proxy_test_") + config := ResetConfig("handshake_test_") defer os.RemoveAll(config.RootDir) privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0) diff --git a/consensus/state.go b/consensus/state.go index 74ec092f..1f6bad9a 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -484,18 +484,9 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) { return } seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) - lastPrecommits := types.NewVoteSet(state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.PrecommitType, state.LastValidators) - for _, precommit := range seenCommit.Precommits { - if precommit == nil { - continue - } - added, err := lastPrecommits.AddVote(seenCommit.ToVote(precommit)) - if !added || err != nil { - cmn.PanicCrisis(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err)) - } - } + lastPrecommits := types.CommitToVoteSet(state.ChainID, seenCommit, state.LastValidators) if !lastPrecommits.HasTwoThirdsMajority() { - cmn.PanicSanity("Failed to reconstruct LastCommit: Does not have +2/3 maj") + panic("Failed to reconstruct LastCommit: Does not have +2/3 maj") } cs.LastCommit = lastPrecommits } @@ -504,13 +495,13 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) { // The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight. func (cs *ConsensusState) updateToState(state sm.State) { if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight { - cmn.PanicSanity(fmt.Sprintf("updateToState() expected state height of %v but found %v", + panic(fmt.Sprintf("updateToState() expected state height of %v but found %v", cs.Height, state.LastBlockHeight)) } if !cs.state.IsEmpty() && cs.state.LastBlockHeight+1 != cs.Height { // This might happen when someone else is mutating cs.state. // Someone forgot to pass in state.Copy() somewhere?! - cmn.PanicSanity(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", + panic(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", cs.state.LastBlockHeight+1, cs.Height)) } @@ -530,7 +521,7 @@ func (cs *ConsensusState) updateToState(state sm.State) { lastPrecommits := (*types.VoteSet)(nil) if cs.CommitRound > -1 && cs.Votes != nil { if !cs.Votes.Precommits(cs.CommitRound).HasTwoThirdsMajority() { - cmn.PanicSanity("updateToState(state) called but last Precommit round didn't have +2/3") + panic("updateToState(state) called but last Precommit round didn't have +2/3") } lastPrecommits = cs.Votes.Precommits(cs.CommitRound) } @@ -1047,7 +1038,7 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { return } if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { - cmn.PanicSanity(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) + panic(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) } logger.Info(fmt.Sprintf("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) @@ -1103,7 +1094,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { // the latest POLRound should be this round. polRound, _ := cs.Votes.POLInfo() if polRound < round { - cmn.PanicSanity(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound)) + panic(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound)) } // +2/3 prevoted nil. Unlock and precommit nil. @@ -1137,7 +1128,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash) // Validate the block. if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { - cmn.PanicConsensus(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) + panic(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) } cs.LockedRound = round cs.LockedBlock = cs.ProposalBlock @@ -1175,7 +1166,7 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) { return } if !cs.Votes.Precommits(round).HasTwoThirdsAny() { - cmn.PanicSanity(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round)) + panic(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round)) } logger.Info(fmt.Sprintf("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) @@ -1214,7 +1205,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) { blockID, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority() if !ok { - cmn.PanicSanity("RunActionCommit() expects +2/3 precommits") + panic("RunActionCommit() expects +2/3 precommits") } // The Locked* fields no longer matter. @@ -1247,7 +1238,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) { logger := cs.Logger.With("height", height) if cs.Height != height { - cmn.PanicSanity(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) + panic(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) } blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() @@ -1277,16 +1268,16 @@ func (cs *ConsensusState) finalizeCommit(height int64) { block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts if !ok { - cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority")) + panic(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority")) } if !blockParts.HasHeader(blockID.PartsHeader) { - cmn.PanicSanity(fmt.Sprintf("Expected ProposalBlockParts header to be commit header")) + panic(fmt.Sprintf("Expected ProposalBlockParts header to be commit header")) } if !block.HashesTo(blockID.Hash) { - cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) + panic(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) } if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil { - cmn.PanicConsensus(fmt.Sprintf("+2/3 committed an invalid block: %v", err)) + panic(fmt.Sprintf("+2/3 committed an invalid block: %v", err)) } cs.Logger.Info(fmt.Sprintf("Finalizing commit of block with %d txs", block.NumTxs), diff --git a/consensus/state_test.go b/consensus/state_test.go index fc1e3e94..87e351dc 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -239,7 +239,7 @@ func TestStateFullRound1(t *testing.T) { cs.SetEventBus(eventBus) eventBus.Start() - voteCh := subscribe(cs.eventBus, types.EventQueryVote) + voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) @@ -267,7 +267,7 @@ func TestStateFullRoundNil(t *testing.T) { cs, vss := randConsensusState(1) height, round := cs.Height, cs.Round - voteCh := subscribe(cs.eventBus, types.EventQueryVote) + voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) cs.enterPrevote(height, round) cs.startRoutines(4) @@ -286,7 +286,7 @@ func TestStateFullRound2(t *testing.T) { vs2 := vss[1] height, round := cs1.Height, cs1.Round - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote) newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) // start round and wait for propose and prevote @@ -330,7 +330,7 @@ func TestStateLockNoPOL(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -370,7 +370,7 @@ func TestStateLockNoPOL(t *testing.T) { // (note we're entering precommit for a second time this round) // but with invalid args. then we enterPrecommitWait, and the timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) /// @@ -384,7 +384,7 @@ func TestStateLockNoPOL(t *testing.T) { incrementRound(vs2) // now we're on a new round and not the proposer, so wait for timeout - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) rs := cs1.GetRoundState() @@ -403,7 +403,7 @@ func TestStateLockNoPOL(t *testing.T) { // now we're going to enter prevote again, but with invalid args // and then prevote wait, which should timeout. then wait for precommit - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) // precommit // the proposed block should still be locked and our precommit added @@ -416,7 +416,7 @@ func TestStateLockNoPOL(t *testing.T) { // (note we're entering precommit for a second time this round, but with invalid args // then we enterPrecommitWait and timeout into NewRound - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round = round + 1 // entering new round ensureNewRound(newRoundCh, height, round) @@ -441,7 +441,7 @@ func TestStateLockNoPOL(t *testing.T) { signAddVotes(cs1, types.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) ensurePrevote(voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) // precommit validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal @@ -449,7 +449,7 @@ func TestStateLockNoPOL(t *testing.T) { signAddVotes(cs1, types.PrecommitType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height ensurePrecommit(voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) cs2, _ := randConsensusState(2) // needed so generated block is different than locked block // before we time out into new round, set next proposal block @@ -482,7 +482,7 @@ func TestStateLockNoPOL(t *testing.T) { signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) ensurePrevote(voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal @@ -542,7 +542,7 @@ func TestStateLockPOLRelock(t *testing.T) { incrementRound(vs2, vs3, vs4) // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round = round + 1 // moving to the next round //XXX: this isnt guaranteed to get there before the timeoutPropose ... @@ -632,7 +632,7 @@ func TestStateLockPOLUnlock(t *testing.T) { propBlockParts := propBlock.MakePartSet(partSize) // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) rs = cs1.GetRoundState() lockedBlockHash := rs.LockedBlock.Hash() @@ -710,7 +710,7 @@ func TestStateLockPOLSafety1(t *testing.T) { // cs1 precommit nil ensurePrecommit(voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) t.Log("### ONTO ROUND 1") @@ -754,7 +754,7 @@ func TestStateLockPOLSafety1(t *testing.T) { signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) incrementRound(vs2, vs3, vs4) round = round + 1 // moving to the next round @@ -767,7 +767,7 @@ func TestStateLockPOLSafety1(t *testing.T) { */ // timeout of propose - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) // finish prevote ensurePrevote(voteCh, height, round) @@ -850,7 +850,7 @@ func TestStateLockPOLSafety2(t *testing.T) { incrementRound(vs2, vs3, vs4) // timeout of precommit wait to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round = round + 1 // moving to the next round // in round 2 we see the polkad block from round 0 @@ -919,7 +919,7 @@ func TestProposeValidBlock(t *testing.T) { signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) incrementRound(vs2, vs3, vs4) round = round + 1 // moving to the next round @@ -929,7 +929,7 @@ func TestProposeValidBlock(t *testing.T) { t.Log("### ONTO ROUND 2") // timeout of propose - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], propBlockHash) @@ -952,7 +952,7 @@ func TestProposeValidBlock(t *testing.T) { ensureNewRound(newRoundCh, height, round) t.Log("### ONTO ROUND 3") - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round = round + 1 // moving to the next round @@ -1004,7 +1004,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { // vs3 send prevote nil signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs3) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) // we should have precommitted @@ -1052,7 +1052,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { startTestRound(cs1, cs1.Height, round) ensureNewRound(newRoundCh, height, round) - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], nil) @@ -1065,7 +1065,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) ensureNewValidBlock(validBlockCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) @@ -1099,7 +1099,7 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) { signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) ensureNewRound(newRoundCh, height, round+1) } @@ -1131,7 +1131,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { rs := cs1.GetRoundState() assert.True(t, rs.Step == cstypes.RoundStepPropose) // P0 does not prevote before timeoutPropose expires - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], nil) @@ -1165,7 +1165,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round = round + 1 // moving to the next round ensureNewRound(newRoundCh, height, round) @@ -1191,7 +1191,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { incrementRound(vss[1:]...) signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], nil) @@ -1332,7 +1332,7 @@ func TestStartNextHeightCorrectly(t *testing.T) { cs1.txNotifier.(*fakeTxNotifier).Notify() - ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.Propose(round).Nanoseconds()) rs = cs1.GetRoundState() assert.False(t, rs.TriggeredTimeoutPrecommit, "triggeredTimeoutPrecommit should be false at the beginning of each round") } @@ -1375,12 +1375,8 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { // add precommits signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2) signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) - time.Sleep(5 * time.Millisecond) signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs4) - rs = cs1.GetRoundState() - assert.True(t, rs.TriggeredTimeoutPrecommit) - ensureNewBlockHeader(newBlockHeader, height, theBlockHash) prop, propBlock := decideProposal(cs1, vs2, height+1, 0) @@ -1519,7 +1515,7 @@ func TestStateHalt1(t *testing.T) { incrementRound(vs2, vs3, vs4) // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round = round + 1 // moving to the next round @@ -1627,3 +1623,12 @@ func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Messa } return sub.Out() } + +// subscribe subscribes test client to the given query and returns a channel with cap = 0. +func subscribeUnBuffered(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { + sub, err := eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, q) + if err != nil { + panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) + } + return sub.Out() +} diff --git a/consensus/types/height_vote_set.go b/consensus/types/height_vote_set.go index eee013ee..35c9a486 100644 --- a/consensus/types/height_vote_set.go +++ b/consensus/types/height_vote_set.go @@ -6,7 +6,6 @@ import ( "strings" "sync" - cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -83,7 +82,7 @@ func (hvs *HeightVoteSet) SetRound(round int) { hvs.mtx.Lock() defer hvs.mtx.Unlock() if hvs.round != 0 && (round < hvs.round+1) { - cmn.PanicSanity("SetRound() must increment hvs.round") + panic("SetRound() must increment hvs.round") } for r := hvs.round + 1; r <= round; r++ { if _, ok := hvs.roundVoteSets[r]; ok { @@ -96,7 +95,7 @@ func (hvs *HeightVoteSet) SetRound(round int) { func (hvs *HeightVoteSet) addRound(round int) { if _, ok := hvs.roundVoteSets[round]; ok { - cmn.PanicSanity("addRound() for an existing round") + panic("addRound() for an existing round") } // log.Debug("addRound(round)", "round", round) prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrevoteType, hvs.valSet) @@ -169,8 +168,7 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ types.SignedMsgType) *type case types.PrecommitType: return rvs.Precommits default: - cmn.PanicSanity(fmt.Sprintf("Unexpected vote type %X", type_)) - return nil + panic(fmt.Sprintf("Unexpected vote type %X", type_)) } } diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index ab6ca9ef..ce5791f3 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -10,11 +10,13 @@ import ( "time" "github.com/pkg/errors" + "github.com/tendermint/tendermint/abci/example/kvstore" cfg "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" @@ -45,14 +47,16 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { if err != nil { return errors.Wrap(err, "failed to read genesis file") } - stateDB := db.NewMemDB() blockStoreDB := db.NewMemDB() + stateDB := blockStoreDB state, err := sm.MakeGenesisState(genDoc) if err != nil { return errors.Wrap(err, "failed to make genesis state") } state.Version.Consensus.App = kvstore.ProtocolVersion + sm.SaveState(stateDB, state) blockStore := tmstore.NewBlockStore(blockStoreDB) + proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app)) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { @@ -66,7 +70,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { return errors.Wrap(err, "failed to start event bus") } defer eventBus.Stop() - mempool := sm.MockMempool{} + mempool := mock.Mempool{} evpool := sm.MockEvidencePool{} blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) consensusState := NewConsensusState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) diff --git a/crypto/merkle/simple_tree.go b/crypto/merkle/simple_tree.go index 5de514b5..03dc9d9d 100644 --- a/crypto/merkle/simple_tree.go +++ b/crypto/merkle/simple_tree.go @@ -20,6 +20,77 @@ func SimpleHashFromByteSlices(items [][]byte) []byte { } } +// SimpleHashFromByteSliceIterative is an iterative alternative to +// SimpleHashFromByteSlice motivated by potential performance improvements. +// (#2611) had suggested that an iterative version of +// SimpleHashFromByteSlice would be faster, presumably because +// we can envision some overhead accumulating from stack +// frames and function calls. Additionally, a recursive algorithm risks +// hitting the stack limit and causing a stack overflow should the tree +// be too large. +// +// Provided here is an iterative alternative, a simple test to assert +// correctness and a benchmark. On the performance side, there appears to +// be no overall difference: +// +// BenchmarkSimpleHashAlternatives/recursive-4 20000 77677 ns/op +// BenchmarkSimpleHashAlternatives/iterative-4 20000 76802 ns/op +// +// On the surface it might seem that the additional overhead is due to +// the different allocation patterns of the implementations. The recursive +// version uses a single [][]byte slices which it then re-slices at each level of the tree. +// The iterative version reproduces [][]byte once within the function and +// then rewrites sub-slices of that array at each level of the tree. +// +// Experimenting by modifying the code to simply calculate the +// hash and not store the result show little to no difference in performance. +// +// These preliminary results suggest: +// +// 1. The performance of the SimpleHashFromByteSlice is pretty good +// 2. Go has low overhead for recursive functions +// 3. The performance of the SimpleHashFromByteSlice routine is dominated +// by the actual hashing of data +// +// Although this work is in no way exhaustive, point #3 suggests that +// optimization of this routine would need to take an alternative +// approach to make significant improvements on the current performance. +// +// Finally, considering that the recursive implementation is easier to +// read, it might not be worthwhile to switch to a less intuitive +// implementation for so little benefit. +func SimpleHashFromByteSlicesIterative(input [][]byte) []byte { + items := make([][]byte, len(input)) + + for i, leaf := range input { + items[i] = leafHash(leaf) + } + + size := len(items) + for { + switch size { + case 0: + return nil + case 1: + return items[0] + default: + rp := 0 // read position + wp := 0 // write position + for rp < size { + if rp+1 < size { + items[wp] = innerHash(items[rp], items[rp+1]) + rp += 2 + } else { + items[wp] = items[rp] + rp += 1 + } + wp += 1 + } + size = wp + } + } +} + // SimpleHashFromMap computes a Merkle tree from sorted map. // Like calling SimpleHashFromHashers with // `item = []byte(Hash(key) | Hash(value))`, diff --git a/crypto/merkle/simple_tree_test.go b/crypto/merkle/simple_tree_test.go index 9abe321c..5bbe294a 100644 --- a/crypto/merkle/simple_tree_test.go +++ b/crypto/merkle/simple_tree_test.go @@ -70,6 +70,42 @@ func TestSimpleProof(t *testing.T) { } } +func TestSimpleHashAlternatives(t *testing.T) { + + total := 100 + + items := make([][]byte, total) + for i := 0; i < total; i++ { + items[i] = testItem(cmn.RandBytes(tmhash.Size)) + } + + rootHash1 := SimpleHashFromByteSlicesIterative(items) + rootHash2 := SimpleHashFromByteSlices(items) + require.Equal(t, rootHash1, rootHash2, "Unmatched root hashes: %X vs %X", rootHash1, rootHash2) +} + +func BenchmarkSimpleHashAlternatives(b *testing.B) { + total := 100 + + items := make([][]byte, total) + for i := 0; i < total; i++ { + items[i] = testItem(cmn.RandBytes(tmhash.Size)) + } + + b.ResetTimer() + b.Run("recursive", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = SimpleHashFromByteSlices(items) + } + }) + + b.Run("iterative", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = SimpleHashFromByteSlicesIterative(items) + } + }) +} + func Test_getSplitPoint(t *testing.T) { tests := []struct { length int diff --git a/crypto/multisig/multisignature.go b/crypto/multisig/multisignature.go index 0d179689..1e3bef4e 100644 --- a/crypto/multisig/multisignature.go +++ b/crypto/multisig/multisignature.go @@ -1,7 +1,8 @@ package multisig import ( - "errors" + "fmt" + "strings" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/multisig/bitarray" @@ -53,13 +54,19 @@ func (mSig *Multisignature) AddSignature(sig []byte, index int) { mSig.Sigs[newSigIndex] = sig } -// AddSignatureFromPubKey adds a signature to the multisig, -// at the index in keys corresponding to the provided pubkey. +// AddSignatureFromPubKey adds a signature to the multisig, at the index in +// keys corresponding to the provided pubkey. func (mSig *Multisignature) AddSignatureFromPubKey(sig []byte, pubkey crypto.PubKey, keys []crypto.PubKey) error { index := getIndex(pubkey, keys) if index == -1 { - return errors.New("provided key didn't exist in pubkeys") + keysStr := make([]string, len(keys)) + for i, k := range keys { + keysStr[i] = fmt.Sprintf("%X", k.Bytes()) + } + + return fmt.Errorf("provided key %X doesn't exist in pubkeys: \n%s", pubkey.Bytes(), strings.Join(keysStr, "\n")) } + mSig.AddSignature(sig, index) return nil } diff --git a/crypto/multisig/threshold_pubkey_test.go b/crypto/multisig/threshold_pubkey_test.go index 2d2632ab..d1d7e803 100644 --- a/crypto/multisig/threshold_pubkey_test.go +++ b/crypto/multisig/threshold_pubkey_test.go @@ -36,30 +36,68 @@ func TestThresholdMultisigValidCases(t *testing.T) { for tcIndex, tc := range cases { multisigKey := NewPubKeyMultisigThreshold(tc.k, tc.pubkeys) multisignature := NewMultisig(len(tc.pubkeys)) + for i := 0; i < tc.k-1; i++ { signingIndex := tc.signingIndices[i] - multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) - require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), - "multisig passed when i < k, tc %d, i %d", tcIndex, i) - multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) - require.Equal(t, i+1, len(multisignature.Sigs), - "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex) + require.NoError( + t, + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys), + ) + require.False( + t, + multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig passed when i < k, tc %d, i %d", tcIndex, i, + ) + require.NoError( + t, + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys), + ) + require.Equal( + t, + i+1, + len(multisignature.Sigs), + "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex, + ) } - require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), - "multisig passed with k - 1 sigs, tc %d", tcIndex) - multisignature.AddSignatureFromPubKey(tc.signatures[tc.signingIndices[tc.k]], tc.pubkeys[tc.signingIndices[tc.k]], tc.pubkeys) - require.True(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), - "multisig failed after k good signatures, tc %d", tcIndex) + + require.False( + t, + multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig passed with k - 1 sigs, tc %d", tcIndex, + ) + require.NoError( + t, + multisignature.AddSignatureFromPubKey(tc.signatures[tc.signingIndices[tc.k]], tc.pubkeys[tc.signingIndices[tc.k]], tc.pubkeys), + ) + require.True( + t, + multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig failed after k good signatures, tc %d", tcIndex, + ) + for i := tc.k + 1; i < len(tc.signingIndices); i++ { signingIndex := tc.signingIndices[i] - multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) - require.Equal(t, tc.passAfterKSignatures[i-tc.k-1], - multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), - "multisig didn't verify as expected after k sigs, tc %d, i %d", tcIndex, i) - multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) - require.Equal(t, i+1, len(multisignature.Sigs), - "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex) + require.NoError( + t, + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys), + ) + require.Equal( + t, + tc.passAfterKSignatures[i-tc.k-1], + multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig didn't verify as expected after k sigs, tc %d, i %d", tcIndex, i, + ) + require.NoError( + t, + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys), + ) + require.Equal( + t, + i+1, + len(multisignature.Sigs), + "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex, + ) } } } diff --git a/crypto/xsalsa20symmetric/symmetric.go b/crypto/xsalsa20symmetric/symmetric.go index 10a0f6f3..73dc9dec 100644 --- a/crypto/xsalsa20symmetric/symmetric.go +++ b/crypto/xsalsa20symmetric/symmetric.go @@ -7,7 +7,6 @@ import ( "golang.org/x/crypto/nacl/secretbox" "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tendermint/libs/common" ) // TODO, make this into a struct that implements crypto.Symmetric. @@ -19,7 +18,7 @@ const secretLen = 32 // The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { if len(secret) != secretLen { - cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) + panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) } nonce := crypto.CRandBytes(nonceLen) nonceArr := [nonceLen]byte{} @@ -36,7 +35,7 @@ func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { // The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. func DecryptSymmetric(ciphertext []byte, secret []byte) (plaintext []byte, err error) { if len(secret) != secretLen { - cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) + panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) } if len(ciphertext) <= secretbox.Overhead+nonceLen { return nil, errors.New("Ciphertext is too short") diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 3e6cced8..b09b9a11 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -16,7 +16,6 @@ cd $GOPATH/src/github.com/tendermint git clone https://github.com/tendermint/tendermint.git cd tendermint make get_tools -make get_vendor_deps make install_abci ``` diff --git a/docs/app-dev/getting-started.md b/docs/app-dev/getting-started.md index 5509a701..eff70db6 100644 --- a/docs/app-dev/getting-started.md +++ b/docs/app-dev/getting-started.md @@ -28,7 +28,6 @@ Then run go get github.com/tendermint/tendermint cd $GOPATH/src/github.com/tendermint/tendermint make get_tools -make get_vendor_deps make install_abci ``` @@ -138,7 +137,7 @@ The result should look like: Note the `value` in the result (`YWJjZA==`); this is the base64-encoding of the ASCII of `abcd`. You can verify this in a python 2 shell by running `"YWJjZA==".decode('base64')` or in python 3 shell by running -`import codecs; codecs.decode("YWJjZA==", 'base64').decode('ascii')`. +`import codecs; codecs.decode(b"YWJjZA==", 'base64').decode('ascii')`. Stay tuned for a future release that [makes this output more human-readable](https://github.com/tendermint/tendermint/issues/1794). diff --git a/docs/app-dev/subscribing-to-events-via-websocket.md b/docs/app-dev/subscribing-to-events-via-websocket.md index d745769c..890b061b 100644 --- a/docs/app-dev/subscribing-to-events-via-websocket.md +++ b/docs/app-dev/subscribing-to-events-via-websocket.md @@ -2,7 +2,7 @@ Tendermint emits different events, to which you can subscribe via [Websocket](https://en.wikipedia.org/wiki/WebSocket). This can be useful -for third-party applications (for analysys) or inspecting state. +for third-party applications (for analysis) or inspecting state. [List of events](https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants) diff --git a/docs/architecture/adr-025-commit.md b/docs/architecture/adr-025-commit.md index 3f252795..6db039d4 100644 --- a/docs/architecture/adr-025-commit.md +++ b/docs/architecture/adr-025-commit.md @@ -1,14 +1,18 @@ # ADR 025 Commit ## Context + Currently the `Commit` structure contains a lot of potentially redundant or unnecessary data. -In particular it contains an array of every precommit from the validators, which includes many copies of the same data. Such as `Height`, `Round`, `Type`, and `BlockID`. Also the `ValidatorIndex` could be derived from the vote's position in the array, and the `ValidatorAddress` could potentially be derived from runtime context. The only truely necessary data is the `Signature` and `Timestamp` associated with each `Vote`. +It contains a list of precommits from every validator, where the precommit +includes the whole `Vote` structure. Thus each of the commit height, round, +type, and blockID are repeated for every validator, and could be deduplicated. ``` type Commit struct { BlockID BlockID `json:"block_id"` Precommits []*Vote `json:"precommits"` } + type Vote struct { ValidatorAddress Address `json:"validator_address"` ValidatorIndex int `json:"validator_index"` @@ -26,7 +30,9 @@ References: [#2226](https://github.com/tendermint/tendermint/issues/2226) ## Proposed Solution + We can improve efficiency by replacing the usage of the `Vote` struct with a subset of each vote, and by storing the constant values (`Height`, `Round`, `BlockID`) in the Commit itself. + ``` type Commit struct { Height int64 @@ -34,42 +40,56 @@ type Commit struct { BlockID BlockID `json:"block_id"` Precommits []*CommitSig `json:"precommits"` } + type CommitSig struct { + BlockID BlockIDFlag ValidatorAddress Address - Signature []byte Timestamp time.Time + Signature []byte } + + +// indicate which BlockID the signature is for +type BlockIDFlag int + +const ( + BlockIDFlagAbsent BlockIDFlag = iota // vote is not included in the Commit.Precommits + BlockIDFlagCommit // voted for the Commit.BlockID + BlockIDFlagNil // voted for nil +) + ``` -Continuing to store the `ValidatorAddress` in the `CommitSig` takes up extra space, but simplifies the process and allows for easier debugging. + +Note the need for an extra byte to indicate whether the signature is for the BlockID or for nil. +This byte can also be used to indicate an absent vote, rather than using a nil object like we currently do, +which has been [problematic for compatibility between Amino and proto3](https://github.com/tendermint/go-amino/issues/260). + +Note we also continue to store the `ValidatorAddress` in the `CommitSig`. +While this still takes 20-bytes per signature, it ensures that the Commit has all +information necessary to reconstruct Vote, which simplifies mapping between Commit and Vote objects +and with debugging. It also may be necessary for the light-client to know which address a signature corresponds to if +it is trying to verify a current commit with an older validtor set. ## Status + Proposed ## Consequences ### Positive -The size of a `Commit` transmitted over the network goes from: -|BlockID| + n * (|Address| + |ValidatorIndex| + |Height| + |Round| + |Timestamp| + |Type| + |BlockID| + |Signature|) +Removing the Type/Height/Round/Index and the BlockID saves roughly 80 bytes per precommit. +It varies because some integers are varint. The BlockID contains two 32-byte hashes an integer, +and the Height is 8-bytes. -to: +For a chain with 100 validators, that's up to 8kB in savings per block! -|BlockID|+|Height|+|Round| + n*(|Address| + |Signature| + |Timestamp|) - -This saves: - -n * (|BlockID| + |ValidatorIndex| + |Type|) + (n-1) * (Height + Round) - -In the current context, this would concretely be: -(assuming all ints are int64, and hashes are 32 bytes) - -n *(72 + 8 + 1 + 8 + 8) - 16 = n * 97 - 16 - -With 100 validators this is a savings of almost 10KB on every block. - ### Negative -This would add some complexity to the processing and verification of blocks and commits, as votes would have to be reconstructed to be verified and gossiped. The reconstruction could be relatively straightforward, only requiring the copying of data from the `Commit` itself into the newly created `Vote`. + +- Large breaking change to the block and commit structure +- Requires differentiating in code between the Vote and CommitSig objects, which may add some complexity (votes need to be reconstructed to be verified and gossiped) ### Neutral -This design leaves the `ValidatorAddress` in the `CommitSig` and in the `Vote`. These could be removed at some point for additional savings, but that would introduce more complexity, and make printing of `Commit` and `VoteSet` objects less informative, which could harm debugging efficiency and UI/UX. \ No newline at end of file + +- Commit.Precommits no longer contains nil values diff --git a/docs/architecture/adr-037-deliver-block.md b/docs/architecture/adr-037-deliver-block.md new file mode 100644 index 00000000..31907c9a --- /dev/null +++ b/docs/architecture/adr-037-deliver-block.md @@ -0,0 +1,100 @@ +# ADR 037: Deliver Block + +Author: Daniil Lashin (@danil-lashin) + +## Changelog + +13-03-2019: Initial draft + +## Context + +Initial conversation: https://github.com/tendermint/tendermint/issues/2901 + +Some applications can handle transactions in parallel, or at least some +part of tx processing can be parallelized. Now it is not possible for developer +to execute txs in parallel because Tendermint delivers them consequentially. + +## Decision + +Now Tendermint have `BeginBlock`, `EndBlock`, `Commit`, `DeliverTx` steps +while executing block. This doc proposes merging this steps into one `DeliverBlock` +step. It will allow developers of applications to decide how they want to +execute transactions (in parallel or consequentially). Also it will simplify and +speed up communications between application and Tendermint. + +As @jaekwon [mentioned](https://github.com/tendermint/tendermint/issues/2901#issuecomment-477746128) +in discussion not all application will benefit from this solution. In some cases, +when application handles transaction consequentially, it way slow down the blockchain, +because it need to wait until full block is transmitted to application to start +processing it. Also, in the case of complete change of ABCI, we need to force all the apps +to change their implementation completely. That's why I propose to introduce one more ABCI +type. + +# Implementation Changes + +In addition to default application interface which now have this structure + +```go +type Application interface { + // Info and Mempool methods... + + // Consensus Connection + InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore + BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block + DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing + EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set + Commit() ResponseCommit // Commit the state and return the application Merkle root hash +} +``` + +this doc proposes to add one more: + +```go +type Application interface { + // Info and Mempool methods... + + // Consensus Connection + InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore + DeliverBlock(RequestDeliverBlock) ResponseDeliverBlock // Deliver full block + Commit() ResponseCommit // Commit the state and return the application Merkle root hash +} + +type RequestDeliverBlock struct { + Hash []byte + Header Header + Txs Txs + LastCommitInfo LastCommitInfo + ByzantineValidators []Evidence +} + +type ResponseDeliverBlock struct { + ValidatorUpdates []ValidatorUpdate + ConsensusParamUpdates *ConsensusParams + Tags []common.KVPair + TxResults []ResponseDeliverTx +} + +``` + +Also, we will need to add new config param, which will specify what kind of ABCI application uses. +For example, it can be `abci_type`. Then we will have 2 types: +- `advanced` - current ABCI +- `simple` - proposed implementation + +## Status + +In review + +## Consequences + +### Positive + +- much simpler introduction and tutorials for new developers (instead of implementing 5 methods whey +will need to implement only 3) +- txs can be handled in parallel +- simpler interface +- faster communications between Tendermint and application + +### Negative + +- Tendermint should now support 2 kinds of ABCI diff --git a/docs/architecture/adr-037-peer-behaviour.md b/docs/architecture/adr-039-peer-behaviour.md similarity index 57% rename from docs/architecture/adr-037-peer-behaviour.md rename to docs/architecture/adr-039-peer-behaviour.md index 36b02448..4ad051a3 100644 --- a/docs/architecture/adr-037-peer-behaviour.md +++ b/docs/architecture/adr-039-peer-behaviour.md @@ -1,7 +1,8 @@ -# ADR 037: Peer Behaviour Interface +# ADR 039: Peer Behaviour Interface ## Changelog * 07-03-2019: Initial draft +* 14-03-2019: Updates from feedback ## Context @@ -19,36 +20,46 @@ and ties up the reactors in a larger dependency graph when testing. Introduce a `PeerBehaviour` interface and concrete implementations which provide methods for reactors to signal peer behaviour without direct -coupling `p2p.Switch`. Introduce a ErrPeer to provide -concrete reasons for stopping peers. +coupling `p2p.Switch`. Introduce a ErrorBehaviourPeer to provide +concrete reasons for stopping peers. Introduce GoodBehaviourPeer to provide +concrete ways in which a peer contributes. ### Implementation Changes PeerBehaviour then becomes an interface for signaling peer errors as well as for marking peers as `good`. -XXX: It might be better to pass p2p.ID instead of the whole peer but as -a first draft maintain the underlying implementation as much as -possible. - ```go type PeerBehaviour interface { - Errored(peer Peer, reason ErrPeer) - MarkPeerAsGood(peer Peer) + Behaved(peer Peer, reason GoodBehaviourPeer) + Errored(peer Peer, reason ErrorBehaviourPeer) } ``` Instead of signaling peers to stop with arbitrary reasons: `reason interface{}` -We introduce a concrete error type ErrPeer: +We introduce a concrete error type ErrorBehaviourPeer: ```go -type ErrPeer int +type ErrorBehaviourPeer int const ( - ErrPeerUnknown = iota - ErrPeerBadMessage - ErrPeerMessageOutofOrder + ErrorBehaviourUnknown = iota + ErrorBehaviourBadMessage + ErrorBehaviourMessageOutofOrder + ... +) +``` + +To provide additional information on the ways a peer contributed, we introduce +the GoodBehaviourPeer type. + +```go +type GoodBehaviourPeer int + +const ( + GoodBehaviourVote = iota + GoodBehaviourBlockPart ... ) ``` @@ -60,11 +71,11 @@ type SwitchedPeerBehaviour struct { sw *Switch } -func (spb *SwitchedPeerBehaviour) Errored(peer Peer, reason ErrPeer) { +func (spb *SwitchedPeerBehaviour) Errored(peer Peer, reason ErrorBehaviourPeer) { spb.sw.StopPeerForError(peer, reason) } -func (spb *SwitchedPeerBehaviour) MarkPeerAsGood(peer Peer) { +func (spb *SwitchedPeerBehaviour) Behaved(peer Peer, reason GoodBehaviourPeer) { spb.sw.MarkPeerAsGood(peer) } @@ -75,51 +86,54 @@ func NewSwitchedPeerBehaviour(sw *Switch) *SwitchedPeerBehaviour { } ``` -Reactors, which are often difficult to unit test[2](#references). could use an implementation which exposes the signals produced by the reactor in +Reactors, which are often difficult to unit test[2](#references) could use an implementation which exposes the signals produced by the reactor in manufactured scenarios: ```go -type PeerErrors map[Peer][]ErrPeer -type GoodPeers map[Peer]bool +type ErrorBehaviours map[Peer][]ErrorBehaviourPeer +type GoodBehaviours map[Peer][]GoodBehaviourPeer type StorePeerBehaviour struct { - pe PeerErrors - gp GoodPeers + eb ErrorBehaviours + gb GoodBehaviours } func NewStorePeerBehaviour() *StorePeerBehaviour{ return &StorePeerBehaviour{ - pe: make(PeerErrors), - gp: GoodPeers{}, + eb: make(ErrorBehaviours), + gb: make(GoodBehaviours), } } -func (spb StorePeerBehaviour) Errored(peer Peer, reason ErrPeer) { - if _, ok := spb.pe[peer]; !ok { - spb.pe[peer] = []ErrPeer{reason} +func (spb StorePeerBehaviour) Errored(peer Peer, reason ErrorBehaviourPeer) { + if _, ok := spb.eb[peer]; !ok { + spb.eb[peer] = []ErrorBehaviours{reason} } else { - spb.pe[peer] = append(spb.pe[peer], reason) + spb.eb[peer] = append(spb.eb[peer], reason) } } -func (mpb *StorePeerBehaviour) GetPeerErrors() PeerErrors { - return mpb.pe +func (mpb *StorePeerBehaviour) GetErrored() ErrorBehaviours { + return mpb.eb } -func (spb *StorePeerBehaviour) MarkPeerAsGood(peer Peer) { - if _, ok := spb.gp[peer]; !ok { - spb.gp[peer] = true + +func (spb StorePeerBehaviour) Behaved(peer Peer, reason GoodBehaviourPeer) { + if _, ok := spb.gb[peer]; !ok { + spb.gb[peer] = []GoodBehaviourPeer{reason} + } else { + spb.gb[peer] = append(spb.gb[peer], reason) } } -func (spb *StorePeerBehaviour) GetGoodPeers() GoodPeers { - return spb.gp +func (spb *StorePeerBehaviour) GetBehaved() GoodBehaviours { + return spb.gb } ``` ## Status -Proposed +Accepted ## Consequences diff --git a/docs/architecture/adr-040-blockchain-reactor-refactor.md b/docs/architecture/adr-040-blockchain-reactor-refactor.md new file mode 100644 index 00000000..520d55b5 --- /dev/null +++ b/docs/architecture/adr-040-blockchain-reactor-refactor.md @@ -0,0 +1,534 @@ +# ADR 040: Blockchain Reactor Refactor + +## Changelog + +19-03-2019: Initial draft + +## Context + +The Blockchain Reactor's high level responsibility is to enable peers who are far behind the current state of the +blockchain to quickly catch up by downloading many blocks in parallel from its peers, verifying block correctness, and +executing them against the ABCI application. We call the protocol executed by the Blockchain Reactor `fast-sync`. +The current architecture diagram of the blockchain reactor can be found here: + +![Blockchain Reactor Architecture Diagram](img/bc-reactor.png) + +The current architecture consists of dozens of routines and it is tightly depending on the `Switch`, making writing +unit tests almost impossible. Current tests require setting up complex dependency graphs and dealing with concurrency. +Note that having dozens of routines is in this case overkill as most of the time routines sits idle waiting for +something to happen (message to arrive or timeout to expire). Due to dependency on the `Switch`, testing relatively +complex network scenarios and failures (for example adding and removing peers) is very complex tasks and frequently lead +to complex tests with not deterministic behavior ([#3400]). Impossibility to write proper tests makes confidence in +the code low and this resulted in several issues (some are fixed in the meantime and some are still open): +[#3400], [#2897], [#2896], [#2699], [#2888], [#2457], [#2622], [#2026]. + +## Decision + +To remedy these issues we plan a major refactor of the blockchain reactor. The proposed architecture is largely inspired +by ADR-30 and is presented on the following diagram: +![Blockchain Reactor Refactor Diagram](img/bc-reactor-refactor.png) + +We suggest a concurrency architecture where the core algorithm (we call it `Controller`) is extracted into a finite +state machine. The active routine of the reactor is called `Executor` and is responsible for receiving and sending +messages from/to peers and triggering timeouts. What messages should be sent and timeouts triggered is determined mostly +by the `Controller`. The exception is `Peer Heartbeat` mechanism which is `Executor` responsibility. The heartbeat +mechanism is used to remove slow and unresponsive peers from the peer list. Writing of unit tests is simpler with +this architecture as most of the critical logic is part of the `Controller` function. We expect that simpler concurrency +architecture will not have significant negative effect on the performance of this reactor (to be confirmed by +experimental evaluation). + + +### Implementation changes + +We assume the following system model for "fast sync" protocol: + +* a node is connected to a random subset of all nodes that represents its peer set. Some nodes are correct and some + might be faulty. We don't make assumptions about ratio of faulty nodes, i.e., it is possible that all nodes in some + peer set are faulty. +* we assume that communication between correct nodes is synchronous, i.e., if a correct node `p` sends a message `m` to + a correct node `q` at time `t`, then `q` will receive message the latest at time `t+Delta` where `Delta` is a system + parameter that is known by network participants. `Delta` is normally chosen to be an order of magnitude higher than + the real communication delay (maximum) between correct nodes. Therefore if a correct node `p` sends a request message + to a correct node `q` at time `t` and there is no the corresponding reply at time `t + 2*Delta`, then `p` can assume + that `q` is faulty. Note that the network assumptions for the consensus reactor are different (we assume partially + synchronous model there). + +The requirements for the "fast sync" protocol are formally specified as follows: + +- `Correctness`: If a correct node `p` is connected to a correct node `q` for a long enough period of time, then `p` +- will eventually download all requested blocks from `q`. +- `Termination`: If a set of peers of a correct node `p` is stable (no new nodes are added to the peer set of `p`) for +- a long enough period of time, then protocol eventually terminates. +- `Fairness`: A correct node `p` sends requests for blocks to all peers from its peer set. + +As explained above, the `Executor` is responsible for sending and receiving messages that are part of the `fast-sync` +protocol. The following messages are exchanged as part of `fast-sync` protocol: + +``` go +type Message int +const ( + MessageUnknown Message = iota + MessageStatusRequest + MessageStatusResponse + MessageBlockRequest + MessageBlockResponse +) +``` +`MessageStatusRequest` is sent periodically to all peers as a request for a peer to provide its current height. It is +part of the `Peer Heartbeat` mechanism and a failure to respond timely to this message results in a peer being removed +from the peer set. Note that the `Peer Heartbeat` mechanism is used only while a peer is in `fast-sync` mode. We assume +here existence of a mechanism that gives node a possibility to inform its peers that it is in the `fast-sync` mode. + +``` go +type MessageStatusRequest struct { + SeqNum int64 // sequence number of the request +} +``` +`MessageStatusResponse` is sent as a response to `MessageStatusRequest` to inform requester about the peer current +height. + +``` go +type MessageStatusResponse struct { + SeqNum int64 // sequence number of the corresponding request + Height int64 // current peer height +} +``` + +`MessageBlockRequest` is used to make a request for a block and the corresponding commit certificate at a given height. + +``` go +type MessageBlockRequest struct { + Height int64 +} +``` + +`MessageBlockResponse` is a response for the corresponding block request. In addition to providing the block and the +corresponding commit certificate, it contains also a current peer height. + +``` go +type MessageBlockResponse struct { + Height int64 + Block Block + Commit Commit + PeerHeight int64 +} +``` + +In addition to sending and receiving messages, and `HeartBeat` mechanism, controller is also managing timeouts +that are triggered upon `Controller` request. `Controller` is then informed once a timeout expires. + +``` go +type TimeoutTrigger int +const ( + TimeoutUnknown TimeoutTrigger = iota + TimeoutResponseTrigger + TimeoutTerminationTrigger +) +``` + +The `Controller` can be modelled as a function with clearly defined inputs: + +* `State` - current state of the node. Contains data about connected peers and its behavior, pending requests, +* received blocks, etc. +* `Event` - significant events in the network. + +producing clear outputs: + +* `State` - updated state of the node, +* `MessageToSend` - signal what message to send and to which peer +* `TimeoutTrigger` - signal that timeout should be triggered. + + +We consider the following `Event` types: + +``` go +type Event int +const ( + EventUnknown Event = iota + EventStatusReport + EventBlockRequest + EventBlockResponse + EventRemovePeer + EventTimeoutResponse + EventTimeoutTermination +) +``` + +`EventStatusResponse` event is generated once `MessageStatusResponse` is received by the `Executor`. + +``` go +type EventStatusReport struct { + PeerID ID + Height int64 +} +``` + +`EventBlockRequest` event is generated once `MessageBlockRequest` is received by the `Executor`. + +``` go +type EventBlockRequest struct { + Height int64 + PeerID p2p.ID +} +``` +`EventBlockResponse` event is generated upon reception of `MessageBlockResponse` message by the `Executor`. + +``` go +type EventBlockResponse struct { + Height int64 + Block Block + Commit Commit + PeerID ID + PeerHeight int64 +} +``` +`EventRemovePeer` is generated by `Executor` to signal that the connection to a peer is closed due to peer misbehavior. + +``` go +type EventRemovePeer struct { + PeerID ID +} +``` +`EventTimeoutResponse` is generated by `Executor` to signal that a timeout triggered by `TimeoutResponseTrigger` has +expired. + +``` go +type EventTimeoutResponse struct { + PeerID ID + Height int64 +} +``` +`EventTimeoutTermination` is generated by `Executor` to signal that a timeout triggered by `TimeoutTerminationTrigger` +has expired. + +``` go +type EventTimeoutTermination struct { + Height int64 +} +``` + +`MessageToSend` is just a wrapper around `Message` type that contains id of the peer to which message should be sent. + +``` go +type MessageToSend struct { + PeerID ID + Message Message +} +``` + +The Controller state machine can be in two modes: `ModeFastSync` when +a node is trying to catch up with the network by downloading committed blocks, +and `ModeConsensus` in which it executes Tendermint consensus protocol. We +consider that `fast sync` mode terminates once the Controller switch to +`ModeConsensus`. + +``` go +type Mode int +const ( + ModeUnknown Mode = iota + ModeFastSync + ModeConsensus +) +``` +`Controller` is managing the following state: + +``` go +type ControllerState struct { + Height int64 // the first block that is not committed + Mode Mode // mode of operation + PeerMap map[ID]PeerStats // map of peer IDs to peer statistics + MaxRequestPending int64 // maximum height of the pending requests + FailedRequests []int64 // list of failed block requests + PendingRequestsNum int // total number of pending requests + Store []BlockInfo // contains list of downloaded blocks + Executor BlockExecutor // store, verify and executes blocks +} +``` + +`PeerStats` data structure keeps for every peer its current height and a list of pending requests for blocks. + +``` go +type PeerStats struct { + Height int64 + PendingRequest int64 // a request sent to this peer +} +``` + +`BlockInfo` data structure is used to store information (as part of block store) about downloaded blocks: from what peer + a block and the corresponding commit certificate are received. +``` go +type BlockInfo struct { + Block Block + Commit Commit + PeerID ID // a peer from which we received the corresponding Block and Commit +} +``` + +The `Controller` is initialized by providing an initial height (`startHeight`) from which it will start downloading +blocks from peers and the current state of the `BlockExecutor`. + +``` go +func NewControllerState(startHeight int64, executor BlockExecutor) ControllerState { + state = ControllerState {} + state.Height = startHeight + state.Mode = ModeFastSync + state.MaxRequestPending = startHeight - 1 + state.PendingRequestsNum = 0 + state.Executor = executor + initialize state.PeerMap, state.FailedRequests and state.Store to empty data structures + return state +} +``` + +The core protocol logic is given with the following function: + +``` go +func handleEvent(state ControllerState, event Event) (ControllerState, Message, TimeoutTrigger, Error) { + msg = nil + timeout = nil + error = nil + + switch state.Mode { + case ModeConsensus: + switch event := event.(type) { + case EventBlockRequest: + msg = createBlockResponseMessage(state, event) + return state, msg, timeout, error + default: + error = "Only respond to BlockRequests while in ModeConsensus!" + return state, msg, timeout, error + } + + case ModeFastSync: + switch event := event.(type) { + case EventBlockRequest: + msg = createBlockResponseMessage(state, event) + return state, msg, timeout, error + + case EventStatusResponse: + return handleEventStatusResponse(event, state) + + case EventRemovePeer: + return handleEventRemovePeer(event, state) + + case EventBlockResponse: + return handleEventBlockResponse(event, state) + + case EventResponseTimeout: + return handleEventResponseTimeout(event, state) + + case EventTerminationTimeout: + // Termination timeout is triggered in case of empty peer set and in case there are no pending requests. + // If this timeout expires and in the meantime no new peers are added or new pending requests are made + // then `fast-sync` mode terminates by switching to `ModeConsensus`. + // Note that termination timeout should be higher than the response timeout. + if state.Height == event.Height && state.PendingRequestsNum == 0 { state.State = ConsensusMode } + return state, msg, timeout, error + + default: + error = "Received unknown event type!" + return state, msg, timeout, error + } + } +} +``` + +``` go +func createBlockResponseMessage(state ControllerState, event BlockRequest) MessageToSend { + msgToSend = nil + if _, ok := state.PeerMap[event.PeerID]; !ok { peerStats = PeerStats{-1, -1} } + if state.Executor.ContainsBlockWithHeight(event.Height) && event.Height > peerStats.Height { + peerStats = event.Height + msg = BlockResponseMessage{ + Height: event.Height, + Block: state.Executor.getBlock(eventHeight), + Commit: state.Executor.getCommit(eventHeight), + PeerID: event.PeerID, + CurrentHeight: state.Height - 1, + } + msgToSend = MessageToSend { event.PeerID, msg } + } + state.PeerMap[event.PeerID] = peerStats + return msgToSend +} +``` + +``` go +func handleEventStatusResponse(event EventStatusResponse, state ControllerState) (ControllerState, MessageToSend, TimeoutTrigger, Error) { + if _, ok := state.PeerMap[event.PeerID]; !ok { + peerStats = PeerStats{ -1, -1 } + } else { + peerStats = state.PeerMap[event.PeerID] + } + + if event.Height > peerStats.Height { peerStats.Height = event.Height } + // if there are no pending requests for this peer, try to send him a request for block + if peerStats.PendingRequest == -1 { + msg = createBlockRequestMessages(state, event.PeerID, peerStats.Height) + // msg is nil if no request for block can be made to a peer at this point in time + if msg != nil { + peerStats.PendingRequests = msg.Height + state.PendingRequestsNum++ + // when a request for a block is sent to a peer, a response timeout is triggered. If no corresponding block is sent by the peer + // during response timeout period, then the peer is considered faulty and is removed from the peer set. + timeout = ResponseTimeoutTrigger{ msg.PeerID, msg.Height, PeerTimeout } + } else if state.PendingRequestsNum == 0 { + // if there are no pending requests and no new request can be placed to the peer, termination timeout is triggered. + // If termination timeout expires and we are still at the same height and there are no pending requests, the "fast-sync" + // mode is finished and we switch to `ModeConsensus`. + timeout = TerminationTimeoutTrigger{ state.Height, TerminationTimeout } + } + } + state.PeerMap[event.PeerID] = peerStats + return state, msg, timeout, error +} +``` + +``` go +func handleEventRemovePeer(event EventRemovePeer, state ControllerState) (ControllerState, MessageToSend, TimeoutTrigger, Error) { + if _, ok := state.PeerMap[event.PeerID]; ok { + pendingRequest = state.PeerMap[event.PeerID].PendingRequest + // if a peer is removed from the peer set, its pending request is declared failed and added to the `FailedRequests` list + // so it can be retried. + if pendingRequest != -1 { + add(state.FailedRequests, pendingRequest) + } + state.PendingRequestsNum-- + delete(state.PeerMap, event.PeerID) + // if the peer set is empty after removal of this peer then termination timeout is triggered. + if state.PeerMap.isEmpty() { + timeout = TerminationTimeoutTrigger{ state.Height, TerminationTimeout } + } + } else { error = "Removing unknown peer!" } + return state, msg, timeout, error +``` + +``` go +func handleEventBlockResponse(event EventBlockResponse, state ControllerState) (ControllerState, MessageToSend, TimeoutTrigger, Error) + if state.PeerMap[event.PeerID] { + peerStats = state.PeerMap[event.PeerID] + // when expected block arrives from a peer, it is added to the store so it can be verified and if correct executed after. + if peerStats.PendingRequest == event.Height { + peerStats.PendingRequest = -1 + state.PendingRequestsNum-- + if event.PeerHeight > peerStats.Height { peerStats.Height = event.PeerHeight } + state.Store[event.Height] = BlockInfo{ event.Block, event.Commit, event.PeerID } + // blocks are verified sequentially so adding a block to the store does not mean that it will be immediately verified + // as some of the previous blocks might be missing. + state = verifyBlocks(state) // it can lead to event.PeerID being removed from peer list + if _, ok := state.PeerMap[event.PeerID]; ok { + // we try to identify new request for a block that can be asked to the peer + msg = createBlockRequestMessage(state, event.PeerID, peerStats.Height) + if msg != nil { + peerStats.PendingRequests = msg.Height + state.PendingRequestsNum++ + // if request for block is made, response timeout is triggered + timeout = ResponseTimeoutTrigger{ msg.PeerID, msg.Height, PeerTimeout } + } else if state.PeerMap.isEmpty() || state.PendingRequestsNum == 0 { + // if the peer map is empty (the peer can be removed as block verification failed) or there are no pending requests + // termination timeout is triggered. + timeout = TerminationTimeoutTrigger{ state.Height, TerminationTimeout } + } + } + } else { error = "Received Block from wrong peer!" } + } else { error = "Received Block from unknown peer!" } + + state.PeerMap[event.PeerID] = peerStats + return state, msg, timeout, error +} +``` + +``` go +func handleEventResponseTimeout(event, state) { + if _, ok := state.PeerMap[event.PeerID]; ok { + peerStats = state.PeerMap[event.PeerID] + // if a response timeout expires and the peer hasn't delivered the block, the peer is removed from the peer list and + // the request is added to the `FailedRequests` so the block can be downloaded from other peer + if peerStats.PendingRequest == event.Height { + add(state.FailedRequests, pendingRequest) + delete(state.PeerMap, event.PeerID) + state.PendingRequestsNum-- + // if peer set is empty, then termination timeout is triggered + if state.PeerMap.isEmpty() { + timeout = TimeoutTrigger{ state.Height, TerminationTimeout } + } + } + } + return state, msg, timeout, error +} +``` + +``` go +func createBlockRequestMessage(state ControllerState, peerID ID, peerHeight int64) MessageToSend { + msg = nil + blockHeight = -1 + r = find request in state.FailedRequests such that r <= peerHeight // returns `nil` if there are no such request + // if there is a height in failed requests that can be downloaded from the peer send request to it + if r != nil { + blockNumber = r + delete(state.FailedRequests, r) + } else if state.MaxRequestPending < peerHeight { + // if height of the maximum pending request is smaller than peer height, then ask peer for next block + state.MaxRequestPending++ + blockHeight = state.MaxRequestPending // increment state.MaxRequestPending and then return the new value + } + + if blockHeight > -1 { msg = MessageToSend { peerID, MessageBlockRequest { blockHeight } } + return msg +} +``` + +``` go +func verifyBlocks(state State) State { + done = false + for !done { + block = state.Store[height] + if block != nil { + verified = verify block.Block using block.Commit // return `true` is verification succeed, 'false` otherwise + + if verified { + block.Execute() // executing block is costly operation so it might make sense executing asynchronously + state.Height++ + } else { + // if block verification failed, then it is added to `FailedRequests` and the peer is removed from the peer set + add(state.FailedRequests, height) + state.Store[height] = nil + if _, ok := state.PeerMap[block.PeerID]; ok { + pendingRequest = state.PeerMap[block.PeerID].PendingRequest + // if there is a pending request sent to the peer that is just to be removed from the peer set, add it to `FailedRequests` + if pendingRequest != -1 { + add(state.FailedRequests, pendingRequest) + state.PendingRequestsNum-- + } + delete(state.PeerMap, event.PeerID) + } + done = true + } + } else { done = true } + } + return state +} +``` + +In the proposed architecture `Controller` is not active task, i.e., it is being called by the `Executor`. Depending on +the return values returned by `Controller`,`Executor` will send a message to some peer (`msg` != nil), trigger a +timeout (`timeout` != nil) or deal with errors (`error` != nil). +In case a timeout is triggered, it will provide as an input to `Controller` the corresponding timeout event once +timeout expires. + + +## Status + +Draft. + +## Consequences + +### Positive + +- isolated implementation of the algorithm +- improved testability - simpler to prove correctness +- clearer separation of concerns - easier to reason + +### Negative + +### Neutral diff --git a/docs/architecture/adr-041-proposer-selection-via-abci.md b/docs/architecture/adr-041-proposer-selection-via-abci.md new file mode 100644 index 00000000..58bf20de --- /dev/null +++ b/docs/architecture/adr-041-proposer-selection-via-abci.md @@ -0,0 +1,29 @@ +# ADR 041: Application should be in charge of validator set + +## Changelog + + +## Context + +Currently Tendermint is in charge of validator set and proposer selection. Application can only update the validator set changes at EndBlock time. +To support Light Client, application should make sure at least 2/3 of validator are same at each round. + +Application should have full control on validator set changes and proposer selection. In each round Application can provide the list of validators for next rounds in order with their power. The proposer is the first in the list, in case the proposer is offline, the next one can propose the proposal and so on. + +## Decision + +## Status + +## Consequences + +Tendermint is no more in charge of validator set and its changes. The Application should provide the correct information. +However Tendermint can provide psedo-randomness algorithm to help application for selecting proposer in each round. + +### Positive + +### Negative + +### Neutral + +## References + diff --git a/docs/architecture/img/bc-reactor-refactor.png b/docs/architecture/img/bc-reactor-refactor.png new file mode 100644 index 00000000..4cd84a02 Binary files /dev/null and b/docs/architecture/img/bc-reactor-refactor.png differ diff --git a/docs/architecture/img/bc-reactor.png b/docs/architecture/img/bc-reactor.png new file mode 100644 index 00000000..f7fe0f81 Binary files /dev/null and b/docs/architecture/img/bc-reactor.png differ diff --git a/docs/introduction/install.md b/docs/introduction/install.md index 3005a734..4f35ffef 100644 --- a/docs/introduction/install.md +++ b/docs/introduction/install.md @@ -29,7 +29,6 @@ cd tendermint ``` make get_tools -make get_vendor_deps ``` ### Compile @@ -71,7 +70,6 @@ To upgrade, run ``` cd $GOPATH/src/github.com/tendermint/tendermint git pull origin master -make get_vendor_deps make install ``` @@ -79,9 +77,7 @@ make install Install [LevelDB](https://github.com/google/leveldb) (minimum version is 1.7). -### Ubuntu - -Install LevelDB with snappy (optionally): +Install LevelDB with snappy (optionally). Below are commands for Ubuntu: ``` sudo apt-get update @@ -100,23 +96,23 @@ wget https://github.com/google/leveldb/archive/v1.20.tar.gz && \ rm -f v1.20.tar.gz ``` -Set database backend to cleveldb: +Set a database backend to `cleveldb`: ``` # config/config.toml db_backend = "cleveldb" ``` -To install Tendermint, run +To install Tendermint, run: ``` CGO_LDFLAGS="-lsnappy" make install_c ``` -or run +or run: ``` CGO_LDFLAGS="-lsnappy" make build_c ``` -to put the binary in `./build`. +which puts the binary in `./build`. diff --git a/docs/networks/terraform-and-ansible.md b/docs/networks/terraform-and-ansible.md index c08ade17..122591be 100644 --- a/docs/networks/terraform-and-ansible.md +++ b/docs/networks/terraform-and-ansible.md @@ -62,16 +62,18 @@ There are several roles that are self-explanatory: First, we configure our droplets by specifying the paths for tendermint (`BINARY`) and the node files (`CONFIGDIR`). The latter expects any number of directories named `node0, node1, ...` and so on (equal to the -number of droplets created). For this example, we use pre-created files -from [this -directory](https://github.com/tendermint/tendermint/tree/master/docs/examples). -To create your own files, use either the `tendermint testnet` command or -review [manual deployments](./deploy-testnets.md). +number of droplets created). -Here's the command to run: +To create the node files run: ``` -ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples +tendermint testnet +``` + +Then, to configure our droplets run: + +``` +ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible/mytestnet ``` Voila! All your droplets now have the `tendermint` binary and required diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md index c65d96ec..6dc00b5b 100644 --- a/docs/spec/abci/abci.md +++ b/docs/spec/abci/abci.md @@ -38,20 +38,58 @@ Finally, `Query`, `CheckTx`, and `DeliverTx` include a `Codespace string`, whose intended use is to disambiguate `Code` values returned by different domains of the application. The `Codespace` is a namespace for the `Code`. -## Tags +## Events Some methods (`CheckTx, BeginBlock, DeliverTx, EndBlock`) -include a `Tags` field in their `Response*`. Each tag is key-value pair denoting -something about what happened during the methods execution. +include an `Events` field in their `Response*`. Each event contains a type and a +list of attributes, which are key-value pairs denoting something about what happened +during the method's execution. -Tags can be used to index transactions and blocks according to what happened -during their execution. Note that the set of tags returned for a block from +Events can be used to index transactions and blocks according to what happened +during their execution. Note that the set of events returned for a block from `BeginBlock` and `EndBlock` are merged. In case both methods return the same tag, only the value defined in `EndBlock` is used. -Keys and values in tags must be UTF-8 encoded strings (e.g. -"account.owner": "Bob", "balance": "100.0", -"time": "2018-01-02T12:30:00Z") +Each event has a `type` which is meant to categorize the event for a particular +`Response*` or tx. A `Response*` or tx may contain multiple events with duplicate +`type` values, where each distinct entry is meant to categorize attributes for a +particular event. Every key and value in an event's attributes must be UTF-8 +encoded strings along with the even type itself. + +Example: + +```go + abci.ResponseDeliverTx{ + // ... + Events: []abci.Event{ + { + Type: "validator.provisions", + Attributes: cmn.KVPairs{ + cmn.KVPair{Key: []byte("address"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("balance"), Value: []byte("...")}, + }, + }, + { + Type: "validator.provisions", + Attributes: cmn.KVPairs{ + cmn.KVPair{Key: []byte("address"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("balance"), Value: []byte("...")}, + }, + }, + { + Type: "validator.slashed", + Attributes: cmn.KVPairs{ + cmn.KVPair{Key: []byte("address"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, + cmn.KVPair{Key: []byte("reason"), Value: []byte("...")}, + }, + }, + // ... + }, +} +``` ## Determinism diff --git a/docs/spec/abci/apps.md b/docs/spec/abci/apps.md index 47e62eed..908ad3ea 100644 --- a/docs/spec/abci/apps.md +++ b/docs/spec/abci/apps.md @@ -265,7 +265,7 @@ This is enforced by Tendermint consensus. If a block includes evidence older than this, the block will be rejected (validators won't vote for it). -Must have `0 < MaxAge`. +Must have `MaxAge > 0`. ### Updates diff --git a/docs/spec/p2p/config.md b/docs/spec/p2p/config.md index b31a3673..7ff2b5e8 100644 --- a/docs/spec/p2p/config.md +++ b/docs/spec/p2p/config.md @@ -12,14 +12,14 @@ and upon incoming connection shares some peers and disconnects. ## Seeds -`--p2p.seeds “1.2.3.4:26656,2.3.4.5:4444”` +`--p2p.seeds “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:4444”` Dials these seeds when we need more peers. They should return a list of peers and then disconnect. If we already have enough peers in the address book, we may never need to dial them. ## Persistent Peers -`--p2p.persistent_peers “1.2.3.4:26656,2.3.4.5:26656”` +`--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”` Dial these peers and auto-redial them if the connection fails. These are intended to be trusted persistent peers that can help @@ -30,9 +30,9 @@ backoff and will give up after a day of trying to connect. the user will be warned that seeds may auto-close connections and that the node may not be able to keep the connection persistent. -## Private Persistent Peers +## Private Peers -`--p2p.private_persistent_peers “1.2.3.4:26656,2.3.4.5:26656”` +`--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` -These are persistent peers that we do not add to the address book or -gossip to other peers. They stay private to us. +These are IDs of the peers that we do not add to the address book or gossip to +other peers. They stay private to us. diff --git a/docs/spec/reactors/pex/pex.md b/docs/spec/reactors/pex/pex.md index 26f1fa8b..268b4a31 100644 --- a/docs/spec/reactors/pex/pex.md +++ b/docs/spec/reactors/pex/pex.md @@ -21,17 +21,20 @@ inbound (they dialed our public address) or outbound (we dialed them). ## Discovery Peer discovery begins with a list of seeds. -When we have no peers, or have been unable to find enough peers from existing ones, -we dial a randomly selected seed to get a list of peers to dial. + +When we don't have enough peers, we + +1. ask existing peers +2. dial seeds if we're not dialing anyone currently On startup, we will also immediately dial the given list of `persistent_peers`, -and will attempt to maintain persistent connections with them. If the connections die, or we fail to dial, -we will redial every 5s for a few minutes, then switch to an exponential backoff schedule, -and after about a day of trying, stop dialing the peer. +and will attempt to maintain persistent connections with them. If the +connections die, or we fail to dial, we will redial every 5s for a few minutes, +then switch to an exponential backoff schedule, and after about a day of +trying, stop dialing the peer. -So long as we have less than `MaxNumOutboundPeers`, we periodically request additional peers -from each of our own. If sufficient time goes by and we still can't find enough peers, -we try the seeds again. +As long as we have less than `MaxNumOutboundPeers`, we periodically request +additional peers from each of our own and try seeds. ## Listening diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index d19c272f..f24e76d6 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -30,8 +30,19 @@ moniker = "anonymous" # and verifying their commits fast_sync = true -# Database backend: leveldb | memdb | cleveldb -db_backend = "leveldb" +# Database backend: goleveldb | cleveldb | boltdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +db_backend = "goleveldb" # Database directory db_dir = "data" diff --git a/docs/tendermint-core/metrics.md b/docs/tendermint-core/metrics.md index ad6d4c76..94313ddb 100644 --- a/docs/tendermint-core/metrics.md +++ b/docs/tendermint-core/metrics.md @@ -14,34 +14,34 @@ Listen address can be changed in the config file (see The following metrics are available: -| **Name** | **Type** | **Since** | **Tags** | **Description** | -|-----------------------------------------|-----------|-----------|----------|-----------------------------------------------------------------| -| consensus\_height | Gauge | 0.21.0 | | Height of the chain | -| consensus\_validators | Gauge | 0.21.0 | | Number of validators | -| consensus\_validators\_power | Gauge | 0.21.0 | | Total voting power of all validators | -| consensus\_missing\_validators | Gauge | 0.21.0 | | Number of validators who did not sign | -| consensus\_missing\_validators\_power | Gauge | 0.21.0 | | Total voting power of the missing validators | -| consensus\_byzantine\_validators | Gauge | 0.21.0 | | Number of validators who tried to double sign | -| consensus\_byzantine\_validators\_power | Gauge | 0.21.0 | | Total voting power of the byzantine validators | -| consensus\_block\_interval\_seconds | Histogram | 0.21.0 | | Time between this and last block (Block.Header.Time) in seconds | -| consensus\_rounds | Gauge | 0.21.0 | | Number of rounds | -| consensus\_num\_txs | Gauge | 0.21.0 | | Number of transactions | -| consensus\_block\_parts | counter | on dev | peer\_id | number of blockparts transmitted by peer | -| consensus\_latest\_block\_height | gauge | on dev | | /status sync\_info number | -| consensus\_fast\_syncing | gauge | on dev | | either 0 (not fast syncing) or 1 (syncing) | -| consensus\_total\_txs | Gauge | 0.21.0 | | Total number of transactions committed | -| consensus\_block\_size\_bytes | Gauge | 0.21.0 | | Block size in bytes | -| p2p\_peers | Gauge | 0.21.0 | | Number of peers node's connected to | -| p2p\_peer\_receive\_bytes\_total | counter | on dev | peer\_id | number of bytes received from a given peer | -| p2p\_peer\_send\_bytes\_total | counter | on dev | peer\_id | number of bytes sent to a given peer | -| p2p\_peer\_pending\_send\_bytes | gauge | on dev | peer\_id | number of pending bytes to be sent to a given peer | -| p2p\_num\_txs | gauge | on dev | peer\_id | number of transactions submitted by each peer\_id | -| p2p\_pending\_send\_bytes | gauge | on dev | peer\_id | amount of data pending to be sent to peer | -| mempool\_size | Gauge | 0.21.0 | | Number of uncommitted transactions | -| mempool\_tx\_size\_bytes | histogram | on dev | | transaction sizes in bytes | -| mempool\_failed\_txs | counter | on dev | | number of failed transactions | -| mempool\_recheck\_times | counter | on dev | | number of transactions rechecked in the mempool | -| state\_block\_processing\_time | histogram | on dev | | time between BeginBlock and EndBlock in ms | +| **Name** | **Type** | **Since** | **Tags** | **Description** | +|-----------------------------------------|-----------|-----------|----------------|-----------------------------------------------------------------| +| consensus\_height | Gauge | 0.21.0 | | Height of the chain | +| consensus\_validators | Gauge | 0.21.0 | | Number of validators | +| consensus\_validators\_power | Gauge | 0.21.0 | | Total voting power of all validators | +| consensus\_missing\_validators | Gauge | 0.21.0 | | Number of validators who did not sign | +| consensus\_missing\_validators\_power | Gauge | 0.21.0 | | Total voting power of the missing validators | +| consensus\_byzantine\_validators | Gauge | 0.21.0 | | Number of validators who tried to double sign | +| consensus\_byzantine\_validators\_power | Gauge | 0.21.0 | | Total voting power of the byzantine validators | +| consensus\_block\_interval\_seconds | Histogram | 0.21.0 | | Time between this and last block (Block.Header.Time) in seconds | +| consensus\_rounds | Gauge | 0.21.0 | | Number of rounds | +| consensus\_num\_txs | Gauge | 0.21.0 | | Number of transactions | +| consensus\_block\_parts | counter | on dev | peer\_id | number of blockparts transmitted by peer | +| consensus\_latest\_block\_height | gauge | on dev | | /status sync\_info number | +| consensus\_fast\_syncing | gauge | on dev | | either 0 (not fast syncing) or 1 (syncing) | +| consensus\_total\_txs | Gauge | 0.21.0 | | Total number of transactions committed | +| consensus\_block\_size\_bytes | Gauge | 0.21.0 | | Block size in bytes | +| p2p\_peers | Gauge | 0.21.0 | | Number of peers node's connected to | +| p2p\_peer\_receive\_bytes\_total | counter | on dev | peer\_id, chID | number of bytes per channel received from a given peer | +| p2p\_peer\_send\_bytes\_total | counter | on dev | peer\_id, chID | number of bytes per channel sent to a given peer | +| p2p\_peer\_pending\_send\_bytes | gauge | on dev | peer\_id | number of pending bytes to be sent to a given peer | +| p2p\_num\_txs | gauge | on dev | peer\_id | number of transactions submitted by each peer\_id | +| p2p\_pending\_send\_bytes | gauge | on dev | peer\_id | amount of data pending to be sent to peer | +| mempool\_size | Gauge | 0.21.0 | | Number of uncommitted transactions | +| mempool\_tx\_size\_bytes | histogram | on dev | | transaction sizes in bytes | +| mempool\_failed\_txs | counter | on dev | | number of failed transactions | +| mempool\_recheck\_times | counter | on dev | | number of transactions rechecked in the mempool | +| state\_block\_processing\_time | histogram | on dev | | time between BeginBlock and EndBlock in ms | ## Useful queries diff --git a/docs/tendermint-core/running-in-production.md b/docs/tendermint-core/running-in-production.md index 1ec79283..9cb21fc5 100644 --- a/docs/tendermint-core/running-in-production.md +++ b/docs/tendermint-core/running-in-production.md @@ -8,7 +8,7 @@ key-value database. Unfortunately, this implementation of LevelDB seems to suffe install the real C-implementation of LevelDB and compile Tendermint to use that using `make build_c`. See the [install instructions](../introduction/install.md) for details. -Tendermint keeps multiple distinct LevelDB databases in the `$TMROOT/data`: +Tendermint keeps multiple distinct databases in the `$TMROOT/data`: - `blockstore.db`: Keeps the entire blockchain - stores blocks, block commits, and block meta data, each indexed by height. Used to sync new diff --git a/docs/tools/benchmarking.md b/docs/tools/benchmarking.md index 67a472e4..a30ab54a 100644 --- a/docs/tools/benchmarking.md +++ b/docs/tools/benchmarking.md @@ -75,6 +75,5 @@ Each of the connections is handled via two separate goroutines. ## Development ``` -make get_vendor_deps make test ``` diff --git a/docs/tools/monitoring.md b/docs/tools/monitoring.md index fa3901dd..26b90ed7 100644 --- a/docs/tools/monitoring.md +++ b/docs/tools/monitoring.md @@ -88,6 +88,5 @@ websocket. ``` make get_tools -make get_vendor_deps make test ``` diff --git a/evidence/reactor.go b/evidence/reactor.go index bbbab3e9..76ea270d 100644 --- a/evidence/reactor.go +++ b/evidence/reactor.go @@ -60,11 +60,6 @@ func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) { go evR.broadcastEvidenceRoutine(peer) } -// RemovePeer implements Reactor. -func (evR *EvidenceReactor) RemovePeer(peer p2p.Peer, reason interface{}) { - // nothing to do -} - // Receive implements Reactor. // It adds any received evidence to the evpool. func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..8fe1a124 --- /dev/null +++ b/go.mod @@ -0,0 +1,52 @@ +module github.com/tendermint/tendermint + +go 1.12 + +require ( + github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 + github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d + github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a + github.com/davecgh/go-spew v1.1.1 + github.com/etcd-io/bbolt v1.3.2 + github.com/fortytw2/leaktest v1.2.0 + github.com/fsnotify/fsnotify v1.4.7 + github.com/go-kit/kit v0.6.0 + github.com/go-logfmt/logfmt v0.3.0 + github.com/go-stack/stack v1.8.0 + github.com/gogo/protobuf v1.2.1 + github.com/golang/protobuf v1.3.0 + github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db + github.com/gorilla/websocket v1.2.0 + github.com/hashicorp/hcl v1.0.0 + github.com/inconshreveable/mousetrap v1.0.0 + github.com/jmhodges/levigo v1.0.0 + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 + github.com/magiconair/properties v1.8.0 + github.com/matttproud/golang_protobuf_extensions v1.0.1 + github.com/mitchellh/mapstructure v1.1.2 + github.com/pelletier/go-toml v1.2.0 + github.com/pkg/errors v0.8.0 + github.com/pmezard/go-difflib v1.0.0 + github.com/prometheus/client_golang v0.9.1 + github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 + github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 + github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d + github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 + github.com/rs/cors v1.6.0 + github.com/spf13/afero v1.1.2 + github.com/spf13/cast v1.3.0 + github.com/spf13/cobra v0.0.1 + github.com/spf13/jwalterweatherman v1.0.0 + github.com/spf13/pflag v1.0.3 + github.com/spf13/viper v1.0.0 + github.com/stretchr/testify v1.2.2 + github.com/syndtr/goleveldb v0.0.0-20181012014443-6b91fda63f2e + github.com/tendermint/go-amino v0.14.1 + golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25 + golang.org/x/net v0.0.0-20180906233101-161cd47e91fd + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a + golang.org/x/text v0.3.0 + google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2 + google.golang.org/grpc v1.13.0 + gopkg.in/yaml.v2 v2.2.1 +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..fee691de --- /dev/null +++ b/go.sum @@ -0,0 +1,119 @@ +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d h1:xG8Pj6Y6J760xwETNmMzmlt38QSwz0BLp1cZ09g27uw= +github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BRcvO8T0UEPu53cnw4IbV63x1bEjildYhO0= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20180524032703-d4cc87b86016/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a h1:RQMUrEILyYJEoAT34XS/kLu40vC0+po/UfxrBBA4qZE= +github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/etcd-io/bbolt v1.3.2 h1:RLRQ0TKLX7DlBRXAJHvbmXL17Q3KNnTBtZ9B6Qo+/Y0= +github.com/etcd-io/bbolt v1.3.2/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= +github.com/fortytw2/leaktest v1.2.0 h1:cj6GCiwJDH7l3tMHLjZDo0QqPtrXJiWSI9JgpeQKw+Q= +github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-kit/kit v0.6.0 h1:wTifptAGIyIuir4bRyN4h7+kAa2a4eepLYVmRe5qqQ8= +github.com/go-kit/kit v0.6.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gorilla/websocket v1.2.0 h1:VJtLvh6VQym50czpZzx07z/kw9EgAxI3x1ZB8taTMQQ= +github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 h1:Cto4X6SVMWRPBkJ/3YHn1iDGDGc/Z+sW+AEMKHMVvN4= +github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 h1:nkcn14uNmFEuGCb2mBZbBb24RdNRL08b/wb+xBOYpuk= +github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.1 h1:zZh3X5aZbdnoj+4XkaBxKfhO4ot82icYdhhREIAXIj8= +github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.0.0 h1:RUA/ghS2i64rlnn4ydTfblY8Og8QzcPtCcHvgMn+w/I= +github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/syndtr/goleveldb v0.0.0-20181012014443-6b91fda63f2e h1:91EeXI4y4ShkyzkMqZ7QP/ZTIqwXp3RuDu5WFzxcFAs= +github.com/syndtr/goleveldb v0.0.0-20181012014443-6b91fda63f2e/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= +github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk= +github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25 h1:jsG6UpNLt9iAsb0S2AGW28DveNzzgmbXR+ENoPjUeIU= +golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20180710023853-292b43bbf7cb/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181030150119-7e31e0c00fa0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2 h1:67iHsV9djwGdZpdZNbLuQj6FOzCaZe3w+vhLjn5AcFA= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc= +google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/libs/README.md b/libs/README.md index 9ea618db..b1bb0396 100644 --- a/libs/README.md +++ b/libs/README.md @@ -13,7 +13,7 @@ CLI wraps the `cobra` and `viper` packages and handles some common elements of b ## clist -Clist provides a linekd list that is safe for concurrent access by many readers. +Clist provides a linked list that is safe for concurrent access by many readers. ## common diff --git a/libs/circle.yml b/libs/circle.yml index 390ffb03..2b7d1266 100644 --- a/libs/circle.yml +++ b/libs/circle.yml @@ -15,7 +15,7 @@ dependencies: test: override: - - cd $PROJECT_PATH && make get_tools && make get_vendor_deps && bash ./test.sh + - cd $PROJECT_PATH && make get_tools && bash ./test.sh post: - cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt - cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}" diff --git a/libs/clist/clist_test.go b/libs/clist/clist_test.go index 13aca357..1784f821 100644 --- a/libs/clist/clist_test.go +++ b/libs/clist/clist_test.go @@ -261,6 +261,8 @@ func TestWaitChan(t *testing.T) { pushed++ time.Sleep(time.Duration(cmn.RandIntn(25)) * time.Millisecond) } + // apply a deterministic pause so the counter has time to catch up + time.Sleep(25 * time.Millisecond) close(done) }() @@ -273,7 +275,7 @@ FOR_LOOP: next = next.Next() seen++ if next == nil { - continue + t.Fatal("Next should not be nil when waiting on NextWaitChan") } case <-done: break FOR_LOOP diff --git a/libs/common/cmap.go b/libs/common/cmap.go index 2f7720d2..d87adb76 100644 --- a/libs/common/cmap.go +++ b/libs/common/cmap.go @@ -56,7 +56,7 @@ func (cm *CMap) Clear() { func (cm *CMap) Keys() []string { cm.l.Lock() - keys := []string{} + keys := make([]string, 0, len(cm.m)) for k := range cm.m { keys = append(keys, k) } @@ -66,7 +66,7 @@ func (cm *CMap) Keys() []string { func (cm *CMap) Values() []interface{} { cm.l.Lock() - items := []interface{}{} + items := make([]interface{}, 0, len(cm.m)) for _, v := range cm.m { items = append(items, v) } diff --git a/libs/common/errors.go b/libs/common/errors.go index 10e40ebd..24af8426 100644 --- a/libs/common/errors.go +++ b/libs/common/errors.go @@ -212,35 +212,3 @@ func (fe FmtError) String() string { func (fe FmtError) Format() string { return fe.format } - -//---------------------------------------- -// Panic wrappers -// XXX DEPRECATED - -// A panic resulting from a sanity check means there is a programmer error -// and some guarantee is not satisfied. -// XXX DEPRECATED -func PanicSanity(v interface{}) { - panic(fmt.Sprintf("Panicked on a Sanity Check: %v", v)) -} - -// A panic here means something has gone horribly wrong, in the form of data corruption or -// failure of the operating system. In a correct/healthy system, these should never fire. -// If they do, it's indicative of a much more serious problem. -// XXX DEPRECATED -func PanicCrisis(v interface{}) { - panic(fmt.Sprintf("Panicked on a Crisis: %v", v)) -} - -// Indicates a failure of consensus. Someone was malicious or something has -// gone horribly wrong. These should really boot us into an "emergency-recover" mode -// XXX DEPRECATED -func PanicConsensus(v interface{}) { - panic(fmt.Sprintf("Panicked on a Consensus Failure: %v", v)) -} - -// For those times when we're not sure if we should panic -// XXX DEPRECATED -func PanicQ(v interface{}) { - panic(fmt.Sprintf("Panicked questionably: %v", v)) -} diff --git a/libs/common/random.go b/libs/common/random.go index 2de65945..47e44d1c 100644 --- a/libs/common/random.go +++ b/libs/common/random.go @@ -300,7 +300,7 @@ func cRandBytes(numBytes int) []byte { b := make([]byte, numBytes) _, err := crand.Read(b) if err != nil { - PanicCrisis(err) + panic(err) } return b } diff --git a/libs/common/service.go b/libs/common/service.go index 21fb0df3..8eee4813 100644 --- a/libs/common/service.go +++ b/libs/common/service.go @@ -194,8 +194,7 @@ func (bs *BaseService) Reset() error { // OnReset implements Service by panicking. func (bs *BaseService) OnReset() error { - PanicSanity("The service cannot be reset") - return nil + panic("The service cannot be reset") } // IsRunning implements Service by returning true or false depending on the diff --git a/libs/common/throttle_timer_test.go b/libs/common/throttle_timer_test.go index 00f5abde..f5c9dfef 100644 --- a/libs/common/throttle_timer_test.go +++ b/libs/common/throttle_timer_test.go @@ -6,6 +6,7 @@ import ( "time" // make govet noshadow happy... + asrt "github.com/stretchr/testify/assert" ) diff --git a/libs/db/backend_test.go b/libs/db/backend_test.go index fb2a3d0b..d755a6f2 100644 --- a/libs/db/backend_test.go +++ b/libs/db/backend_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" ) diff --git a/libs/db/boltdb.go b/libs/db/boltdb.go new file mode 100644 index 00000000..30501dd8 --- /dev/null +++ b/libs/db/boltdb.go @@ -0,0 +1,349 @@ +// +build boltdb + +package db + +import ( + "bytes" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/etcd-io/bbolt" +) + +var bucket = []byte("tm") + +func init() { + registerDBCreator(BoltDBBackend, func(name, dir string) (DB, error) { + return NewBoltDB(name, dir) + }, false) +} + +// BoltDB is a wrapper around etcd's fork of bolt +// (https://github.com/etcd-io/bbolt). +// +// NOTE: All operations (including Set, Delete) are synchronous by default. One +// can globally turn it off by using NoSync config option (not recommended). +// +// A single bucket ([]byte("tm")) is used per a database instance. This could +// lead to performance issues when/if there will be lots of keys. +type BoltDB struct { + db *bbolt.DB +} + +// NewBoltDB returns a BoltDB with default options. +func NewBoltDB(name, dir string) (DB, error) { + return NewBoltDBWithOpts(name, dir, bbolt.DefaultOptions) +} + +// NewBoltDBWithOpts allows you to supply *bbolt.Options. ReadOnly: true is not +// supported because NewBoltDBWithOpts creates a global bucket. +func NewBoltDBWithOpts(name string, dir string, opts *bbolt.Options) (DB, error) { + if opts.ReadOnly { + return nil, errors.New("ReadOnly: true is not supported") + } + + dbPath := filepath.Join(dir, name+".db") + db, err := bbolt.Open(dbPath, os.ModePerm, opts) + if err != nil { + return nil, err + } + + // create a global bucket + err = db.Update(func(tx *bbolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(bucket) + return err + }) + if err != nil { + return nil, err + } + + return &BoltDB{db: db}, nil +} + +func (bdb *BoltDB) Get(key []byte) (value []byte) { + key = nonEmptyKey(nonNilBytes(key)) + err := bdb.db.View(func(tx *bbolt.Tx) error { + b := tx.Bucket(bucket) + if v := b.Get(key); v != nil { + value = append([]byte{}, v...) + } + return nil + }) + if err != nil { + panic(err) + } + return +} + +func (bdb *BoltDB) Has(key []byte) bool { + return bdb.Get(key) != nil +} + +func (bdb *BoltDB) Set(key, value []byte) { + key = nonEmptyKey(nonNilBytes(key)) + value = nonNilBytes(value) + err := bdb.db.Update(func(tx *bbolt.Tx) error { + b := tx.Bucket(bucket) + return b.Put(key, value) + }) + if err != nil { + panic(err) + } +} + +func (bdb *BoltDB) SetSync(key, value []byte) { + bdb.Set(key, value) +} + +func (bdb *BoltDB) Delete(key []byte) { + key = nonEmptyKey(nonNilBytes(key)) + err := bdb.db.Update(func(tx *bbolt.Tx) error { + return tx.Bucket(bucket).Delete(key) + }) + if err != nil { + panic(err) + } +} + +func (bdb *BoltDB) DeleteSync(key []byte) { + bdb.Delete(key) +} + +func (bdb *BoltDB) Close() { + bdb.db.Close() +} + +func (bdb *BoltDB) Print() { + stats := bdb.db.Stats() + fmt.Printf("%v\n", stats) + + err := bdb.db.View(func(tx *bbolt.Tx) error { + tx.Bucket(bucket).ForEach(func(k, v []byte) error { + fmt.Printf("[%X]:\t[%X]\n", k, v) + return nil + }) + return nil + }) + if err != nil { + panic(err) + } +} + +func (bdb *BoltDB) Stats() map[string]string { + stats := bdb.db.Stats() + m := make(map[string]string) + + // Freelist stats + m["FreePageN"] = fmt.Sprintf("%v", stats.FreePageN) + m["PendingPageN"] = fmt.Sprintf("%v", stats.PendingPageN) + m["FreeAlloc"] = fmt.Sprintf("%v", stats.FreeAlloc) + m["FreelistInuse"] = fmt.Sprintf("%v", stats.FreelistInuse) + + // Transaction stats + m["TxN"] = fmt.Sprintf("%v", stats.TxN) + m["OpenTxN"] = fmt.Sprintf("%v", stats.OpenTxN) + + return m +} + +// boltDBBatch stores key values in sync.Map and dumps them to the underlying +// DB upon Write call. +type boltDBBatch struct { + db *BoltDB + ops []operation +} + +// NewBatch returns a new batch. +func (bdb *BoltDB) NewBatch() Batch { + return &boltDBBatch{ + ops: nil, + db: bdb, + } +} + +// It is safe to modify the contents of the argument after Set returns but not +// before. +func (bdb *boltDBBatch) Set(key, value []byte) { + bdb.ops = append(bdb.ops, operation{opTypeSet, key, value}) +} + +// It is safe to modify the contents of the argument after Delete returns but +// not before. +func (bdb *boltDBBatch) Delete(key []byte) { + bdb.ops = append(bdb.ops, operation{opTypeDelete, key, nil}) +} + +// NOTE: the operation is synchronous (see BoltDB for reasons) +func (bdb *boltDBBatch) Write() { + err := bdb.db.db.Batch(func(tx *bbolt.Tx) error { + b := tx.Bucket(bucket) + for _, op := range bdb.ops { + key := nonEmptyKey(nonNilBytes(op.key)) + switch op.opType { + case opTypeSet: + if putErr := b.Put(key, op.value); putErr != nil { + return putErr + } + case opTypeDelete: + if delErr := b.Delete(key); delErr != nil { + return delErr + } + } + } + return nil + }) + if err != nil { + panic(err) + } +} + +func (bdb *boltDBBatch) WriteSync() { + bdb.Write() +} + +func (bdb *boltDBBatch) Close() {} + +// WARNING: Any concurrent writes or reads will block until the iterator is +// closed. +func (bdb *BoltDB) Iterator(start, end []byte) Iterator { + tx, err := bdb.db.Begin(false) + if err != nil { + panic(err) + } + return newBoltDBIterator(tx, start, end, false) +} + +// WARNING: Any concurrent writes or reads will block until the iterator is +// closed. +func (bdb *BoltDB) ReverseIterator(start, end []byte) Iterator { + tx, err := bdb.db.Begin(false) + if err != nil { + panic(err) + } + return newBoltDBIterator(tx, start, end, true) +} + +// boltDBIterator allows you to iterate on range of keys/values given some +// start / end keys (nil & nil will result in doing full scan). +type boltDBIterator struct { + tx *bbolt.Tx + + itr *bbolt.Cursor + start []byte + end []byte + + currentKey []byte + currentValue []byte + + isInvalid bool + isReverse bool +} + +func newBoltDBIterator(tx *bbolt.Tx, start, end []byte, isReverse bool) *boltDBIterator { + itr := tx.Bucket(bucket).Cursor() + + var ck, cv []byte + if isReverse { + if end == nil { + ck, cv = itr.Last() + } else { + _, _ = itr.Seek(end) // after key + ck, cv = itr.Prev() // return to end key + } + } else { + if start == nil { + ck, cv = itr.First() + } else { + ck, cv = itr.Seek(start) + } + } + + return &boltDBIterator{ + tx: tx, + itr: itr, + start: start, + end: end, + currentKey: ck, + currentValue: cv, + isReverse: isReverse, + isInvalid: false, + } +} + +func (itr *boltDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +func (itr *boltDBIterator) Valid() bool { + if itr.isInvalid { + return false + } + + // iterated to the end of the cursor + if len(itr.currentKey) == 0 { + itr.isInvalid = true + return false + } + + if itr.isReverse { + if itr.start != nil && bytes.Compare(itr.currentKey, itr.start) < 0 { + itr.isInvalid = true + return false + } + } else { + if itr.end != nil && bytes.Compare(itr.end, itr.currentKey) <= 0 { + itr.isInvalid = true + return false + } + } + + // Valid + return true +} + +func (itr *boltDBIterator) Next() { + itr.assertIsValid() + if itr.isReverse { + itr.currentKey, itr.currentValue = itr.itr.Prev() + } else { + itr.currentKey, itr.currentValue = itr.itr.Next() + } +} + +func (itr *boltDBIterator) Key() []byte { + itr.assertIsValid() + return append([]byte{}, itr.currentKey...) +} + +func (itr *boltDBIterator) Value() []byte { + itr.assertIsValid() + var value []byte + if itr.currentValue != nil { + value = append([]byte{}, itr.currentValue...) + } + return value +} + +func (itr *boltDBIterator) Close() { + err := itr.tx.Rollback() + if err != nil { + panic(err) + } +} + +func (itr *boltDBIterator) assertIsValid() { + if !itr.Valid() { + panic("Boltdb-iterator is invalid") + } +} + +// nonEmptyKey returns a []byte("nil") if key is empty. +// WARNING: this may collude with "nil" user key! +func nonEmptyKey(key []byte) []byte { + if len(key) == 0 { + return []byte("nil") + } + return key +} diff --git a/libs/db/boltdb_test.go b/libs/db/boltdb_test.go new file mode 100644 index 00000000..416a8fd0 --- /dev/null +++ b/libs/db/boltdb_test.go @@ -0,0 +1,37 @@ +// +build boltdb + +package db + +import ( + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +func TestBoltDBNewBoltDB(t *testing.T) { + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) + dir := os.TempDir() + defer cleanupDBDir(dir, name) + + db, err := NewBoltDB(name, dir) + require.NoError(t, err) + db.Close() +} + +func BenchmarkBoltDBRandomReadsWrites(b *testing.B) { + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) + db, err := NewBoltDB(name, "") + if err != nil { + b.Fatal(err) + } + defer func() { + db.Close() + cleanupDBDir("", name) + }() + + benchmarkRandomReadsWrites(b, db) +} diff --git a/libs/db/c_level_db.go b/libs/db/c_level_db.go index 116e51bc..7538166b 100644 --- a/libs/db/c_level_db.go +++ b/libs/db/c_level_db.go @@ -1,4 +1,4 @@ -// +build gcc +// +build cleveldb package db @@ -14,7 +14,6 @@ func init() { dbCreator := func(name string, dir string) (DB, error) { return NewCLevelDB(name, dir) } - registerDBCreator(LevelDBBackend, dbCreator, true) registerDBCreator(CLevelDBBackend, dbCreator, false) } diff --git a/libs/db/c_level_db_test.go b/libs/db/c_level_db_test.go index e71dee0c..1c10fcde 100644 --- a/libs/db/c_level_db_test.go +++ b/libs/db/c_level_db_test.go @@ -1,4 +1,4 @@ -// +build gcc +// +build cleveldb package db @@ -93,7 +93,7 @@ func TestCLevelDBBackend(t *testing.T) { // Can't use "" (current directory) or "./" here because levigo.Open returns: // "Error initializing DB: IO error: test_XXX.db: Invalid argument" dir := os.TempDir() - db := NewDB(name, LevelDBBackend, dir) + db := NewDB(name, CLevelDBBackend, dir) defer cleanupDBDir(dir, name) _, ok := db.(*CLevelDB) @@ -103,7 +103,7 @@ func TestCLevelDBBackend(t *testing.T) { func TestCLevelDBStats(t *testing.T) { name := fmt.Sprintf("test_%x", cmn.RandStr(12)) dir := os.TempDir() - db := NewDB(name, LevelDBBackend, dir) + db := NewDB(name, CLevelDBBackend, dir) defer cleanupDBDir(dir, name) assert.NotEmpty(t, db.Stats()) diff --git a/libs/db/common_test.go b/libs/db/common_test.go index 1e27a7ca..64a86979 100644 --- a/libs/db/common_test.go +++ b/libs/db/common_test.go @@ -1,6 +1,8 @@ package db import ( + "bytes" + "encoding/binary" "fmt" "io/ioutil" "sync" @@ -8,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" ) //---------------------------------------- @@ -188,3 +191,66 @@ func (mockIterator) Value() []byte { func (mockIterator) Close() { } + +func benchmarkRandomReadsWrites(b *testing.B, db DB) { + b.StopTimer() + + // create dummy data + const numItems = int64(1000000) + internal := map[int64]int64{} + for i := 0; i < int(numItems); i++ { + internal[int64(i)] = int64(0) + } + + // fmt.Println("ok, starting") + b.StartTimer() + + for i := 0; i < b.N; i++ { + // Write something + { + idx := int64(cmn.RandInt()) % numItems + internal[idx]++ + val := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := int642Bytes(int64(val)) + //fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) + db.Set(idxBytes, valBytes) + } + + // Read something + { + idx := int64(cmn.RandInt()) % numItems + valExp := internal[idx] + idxBytes := int642Bytes(int64(idx)) + valBytes := db.Get(idxBytes) + //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) + if valExp == 0 { + if !bytes.Equal(valBytes, nil) { + b.Errorf("Expected %v for %v, got %X", nil, idx, valBytes) + break + } + } else { + if len(valBytes) != 8 { + b.Errorf("Expected length 8 for %v, got %X", idx, valBytes) + break + } + valGot := bytes2Int64(valBytes) + if valExp != valGot { + b.Errorf("Expected %v for %v, got %v", valExp, idx, valGot) + break + } + } + } + + } +} + +func int642Bytes(i int64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(i)) + return buf +} + +func bytes2Int64(buf []byte) int64 { + return int64(binary.BigEndian.Uint64(buf)) +} diff --git a/libs/db/db.go b/libs/db/db.go index 8a3975a8..d88df398 100644 --- a/libs/db/db.go +++ b/libs/db/db.go @@ -5,17 +5,33 @@ import ( "strings" ) -//---------------------------------------- -// Main entry - type DBBackendType string +// These are valid backend types. const ( - LevelDBBackend DBBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc - CLevelDBBackend DBBackendType = "cleveldb" + // GoLevelDBBackend represents goleveldb (github.com/syndtr/goleveldb - most + // popular implementation) + // - pure go + // - stable GoLevelDBBackend DBBackendType = "goleveldb" - MemDBBackend DBBackendType = "memdb" - FSDBBackend DBBackendType = "fsdb" // using the filesystem naively + // CLevelDBBackend represents cleveldb (uses levigo wrapper) + // - fast + // - requires gcc + // - use cleveldb build tag (go build -tags cleveldb) + CLevelDBBackend DBBackendType = "cleveldb" + // MemDBBackend represents in-memoty key value store, which is mostly used + // for testing. + MemDBBackend DBBackendType = "memdb" + // FSDBBackend represents filesystem database + // - EXPERIMENTAL + // - slow + FSDBBackend DBBackendType = "fsdb" + // BoltDBBackend represents bolt (uses etcd's fork of bolt - + // github.com/etcd-io/bbolt) + // - EXPERIMENTAL + // - may be faster is some use-cases (random reads - indexer) + // - use boltdb build tag (go build -tags boltdb) + BoltDBBackend DBBackendType = "boltdb" ) type dbCreator func(name string, dir string) (DB, error) diff --git a/libs/db/fsdb.go b/libs/db/fsdb.go index 2d82e774..ca8eefe9 100644 --- a/libs/db/fsdb.go +++ b/libs/db/fsdb.go @@ -20,7 +20,7 @@ const ( ) func init() { - registerDBCreator(FSDBBackend, func(name string, dir string) (DB, error) { + registerDBCreator(FSDBBackend, func(name, dir string) (DB, error) { dbPath := filepath.Join(dir, name+".db") return NewFSDB(dbPath), nil }, false) diff --git a/libs/db/go_level_db.go b/libs/db/go_level_db.go index 9a4358f6..8c20ccdd 100644 --- a/libs/db/go_level_db.go +++ b/libs/db/go_level_db.go @@ -9,15 +9,12 @@ import ( "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" - - cmn "github.com/tendermint/tendermint/libs/common" ) func init() { dbCreator := func(name string, dir string) (DB, error) { return NewGoLevelDB(name, dir) } - registerDBCreator(LevelDBBackend, dbCreator, false) registerDBCreator(GoLevelDBBackend, dbCreator, false) } @@ -67,7 +64,7 @@ func (db *GoLevelDB) Set(key []byte, value []byte) { value = nonNilBytes(value) err := db.db.Put(key, value, nil) if err != nil { - cmn.PanicCrisis(err) + panic(err) } } @@ -77,7 +74,7 @@ func (db *GoLevelDB) SetSync(key []byte, value []byte) { value = nonNilBytes(value) err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) if err != nil { - cmn.PanicCrisis(err) + panic(err) } } @@ -86,7 +83,7 @@ func (db *GoLevelDB) Delete(key []byte) { key = nonNilBytes(key) err := db.db.Delete(key, nil) if err != nil { - cmn.PanicCrisis(err) + panic(err) } } @@ -95,7 +92,7 @@ func (db *GoLevelDB) DeleteSync(key []byte) { key = nonNilBytes(key) err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) if err != nil { - cmn.PanicCrisis(err) + panic(err) } } diff --git a/libs/db/go_level_db_test.go b/libs/db/go_level_db_test.go index c24eec3c..f781a2b3 100644 --- a/libs/db/go_level_db_test.go +++ b/libs/db/go_level_db_test.go @@ -1,29 +1,27 @@ package db import ( - "bytes" - "encoding/binary" "fmt" - "os" "testing" + "github.com/stretchr/testify/require" "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/stretchr/testify/require" cmn "github.com/tendermint/tendermint/libs/common" ) -func TestNewGoLevelDB(t *testing.T) { +func TestGoLevelDBNewGoLevelDB(t *testing.T) { name := fmt.Sprintf("test_%x", cmn.RandStr(12)) - // Test write locks - db, err := NewGoLevelDB(name, "") + defer cleanupDBDir("", name) + + // Test we can't open the db twice for writing + wr1, err := NewGoLevelDB(name, "") require.Nil(t, err) - defer os.RemoveAll("./" + name + ".db") _, err = NewGoLevelDB(name, "") require.NotNil(t, err) - db.Close() // Close the db to release the lock + wr1.Close() // Close the db to release the lock - // Open the db twice in a row to test read-only locks + // Test we can open the db twice for reading only ro1, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true}) defer ro1.Close() require.Nil(t, err) @@ -32,75 +30,16 @@ func TestNewGoLevelDB(t *testing.T) { require.Nil(t, err) } -func BenchmarkRandomReadsWrites(b *testing.B) { - b.StopTimer() - - numItems := int64(1000000) - internal := map[int64]int64{} - for i := 0; i < int(numItems); i++ { - internal[int64(i)] = int64(0) - } - db, err := NewGoLevelDB(fmt.Sprintf("test_%x", cmn.RandStr(12)), "") +func BenchmarkGoLevelDBRandomReadsWrites(b *testing.B) { + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) + db, err := NewGoLevelDB(name, "") if err != nil { - b.Fatal(err.Error()) - return + b.Fatal(err) } + defer func() { + db.Close() + cleanupDBDir("", name) + }() - fmt.Println("ok, starting") - b.StartTimer() - - for i := 0; i < b.N; i++ { - // Write something - { - idx := (int64(cmn.RandInt()) % numItems) - internal[idx]++ - val := internal[idx] - idxBytes := int642Bytes(int64(idx)) - valBytes := int642Bytes(int64(val)) - //fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) - db.Set( - idxBytes, - valBytes, - ) - } - // Read something - { - idx := (int64(cmn.RandInt()) % numItems) - val := internal[idx] - idxBytes := int642Bytes(int64(idx)) - valBytes := db.Get(idxBytes) - //fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) - if val == 0 { - if !bytes.Equal(valBytes, nil) { - b.Errorf("Expected %v for %v, got %X", - nil, idx, valBytes) - break - } - } else { - if len(valBytes) != 8 { - b.Errorf("Expected length 8 for %v, got %X", - idx, valBytes) - break - } - valGot := bytes2Int64(valBytes) - if val != valGot { - b.Errorf("Expected %v for %v, got %v", - val, idx, valGot) - break - } - } - } - } - - db.Close() -} - -func int642Bytes(i int64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(i)) - return buf -} - -func bytes2Int64(buf []byte) int64 { - return int64(binary.BigEndian.Uint64(buf)) + benchmarkRandomReadsWrites(b, db) } diff --git a/libs/db/mem_batch.go b/libs/db/mem_batch.go index ebba43f5..2ce76578 100644 --- a/libs/db/mem_batch.go +++ b/libs/db/mem_batch.go @@ -1,8 +1,6 @@ package db -import ( - "sync" -) +import "sync" type atomicSetDeleter interface { Mutex() *sync.Mutex diff --git a/libs/db/mem_db.go b/libs/db/mem_db.go index ff516bc7..fc567577 100644 --- a/libs/db/mem_db.go +++ b/libs/db/mem_db.go @@ -7,7 +7,7 @@ import ( ) func init() { - registerDBCreator(MemDBBackend, func(name string, dir string) (DB, error) { + registerDBCreator(MemDBBackend, func(name, dir string) (DB, error) { return NewMemDB(), nil }, false) } diff --git a/libs/db/remotedb/remotedb_test.go b/libs/db/remotedb/remotedb_test.go index f5c8e2cb..43a02246 100644 --- a/libs/db/remotedb/remotedb_test.go +++ b/libs/db/remotedb/remotedb_test.go @@ -28,7 +28,7 @@ func TestRemoteDB(t *testing.T) { client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert) require.Nil(t, err, "expecting a successful client creation") dbName := "test-remote-db" - require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "leveldb"})) + require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "goleveldb"})) defer func() { err := os.RemoveAll(dbName + ".db") if err != nil { diff --git a/libs/db/util_test.go b/libs/db/util_test.go index 07f9dd23..39a02160 100644 --- a/libs/db/util_test.go +++ b/libs/db/util_test.go @@ -22,6 +22,11 @@ func TestPrefixIteratorNoMatchNil(t *testing.T) { // Empty iterator for db populated after iterator created. func TestPrefixIteratorNoMatch1(t *testing.T) { for backend := range backends { + if backend == BoltDBBackend { + t.Log("bolt does not support concurrent writes while iterating") + continue + } + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db, dir := newTempDB(t, backend) defer os.RemoveAll(dir) diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go index a4369626..34bb2a88 100644 --- a/libs/pubsub/example_test.go +++ b/libs/pubsub/example_test.go @@ -21,7 +21,7 @@ func TestExample(t *testing.T) { ctx := context.Background() subscription, err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'")) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Tombstone", map[string]string{"abci.account.name": "John"}) + err = s.PublishWithEvents(ctx, "Tombstone", map[string][]string{"abci.account.name": {"John"}}) require.NoError(t, err) assertReceive(t, "Tombstone", subscription.Out()) } diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index f78dac1b..cb7b8d5b 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -26,7 +26,7 @@ // for { // select { // case msg <- subscription.Out(): -// // handle msg.Data() and msg.Tags() +// // handle msg.Data() and msg.Events() // case <-subscription.Cancelled(): // return subscription.Err() // } @@ -61,9 +61,14 @@ var ( ErrAlreadySubscribed = errors.New("already subscribed") ) -// Query defines an interface for a query to be used for subscribing. +// Query defines an interface for a query to be used for subscribing. A query +// matches against a map of events. Each key in this map is a composite of the +// even type and an attribute key (e.g. "{eventType}.{eventAttrKey}") and the +// values are the event values that are contained under that relationship. This +// allows event types to repeat themselves with the same set of keys and +// different values. type Query interface { - Matches(tags map[string]string) bool + Matches(events map[string][]string) bool String() string } @@ -76,12 +81,12 @@ type cmd struct { clientID string // publish - msg interface{} - tags map[string]string + msg interface{} + events map[string][]string } // Server allows clients to subscribe/unsubscribe for messages, publishing -// messages with or without tags, and manages internal state. +// messages with or without events, and manages internal state. type Server struct { cmn.BaseService @@ -258,15 +263,15 @@ func (s *Server) NumClientSubscriptions(clientID string) int { // Publish publishes the given message. An error will be returned to the caller // if the context is canceled. func (s *Server) Publish(ctx context.Context, msg interface{}) error { - return s.PublishWithTags(ctx, msg, make(map[string]string)) + return s.PublishWithEvents(ctx, msg, make(map[string][]string)) } -// PublishWithTags publishes the given message with the set of tags. The set is -// matched with clients queries. If there is a match, the message is sent to +// PublishWithEvents publishes the given message with the set of events. The set +// is matched with clients queries. If there is a match, the message is sent to // the client. -func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags map[string]string) error { +func (s *Server) PublishWithEvents(ctx context.Context, msg interface{}, events map[string][]string) error { select { - case s.cmds <- cmd{op: pub, msg: msg, tags: tags}: + case s.cmds <- cmd{op: pub, msg: msg, events: events}: return nil case <-ctx.Done(): return ctx.Err() @@ -325,7 +330,7 @@ loop: case sub: state.add(cmd.clientID, cmd.query, cmd.subscription) case pub: - state.send(cmd.msg, cmd.tags) + state.send(cmd.msg, cmd.events) } } } @@ -392,18 +397,18 @@ func (state *state) removeAll(reason error) { } } -func (state *state) send(msg interface{}, tags map[string]string) { +func (state *state) send(msg interface{}, events map[string][]string) { for qStr, clientSubscriptions := range state.subscriptions { q := state.queries[qStr].q - if q.Matches(tags) { + if q.Matches(events) { for clientID, subscription := range clientSubscriptions { if cap(subscription.out) == 0 { // block on unbuffered channel - subscription.out <- Message{msg, tags} + subscription.out <- NewMessage(msg, events) } else { // don't block on buffered channels select { - case subscription.out <- Message{msg, tags}: + case subscription.out <- NewMessage(msg, events): default: state.remove(clientID, qStr, ErrOutOfCapacity) } diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index 88447756..d5f61dc0 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -46,13 +46,16 @@ func TestSubscribe(t *testing.T) { err = s.Publish(ctx, "Asylum") assert.NoError(t, err) + + err = s.Publish(ctx, "Ivan") + assert.NoError(t, err) }() select { case <-published: assertReceive(t, "Quicksilver", subscription.Out()) assertCancelled(t, subscription, pubsub.ErrOutOfCapacity) - case <-time.After(100 * time.Millisecond): + case <-time.After(3 * time.Second): t.Fatal("Expected Publish(Asylum) not to block") } } @@ -101,7 +104,7 @@ func TestSubscribeUnbuffered(t *testing.T) { select { case <-published: t.Fatal("Expected Publish(Darkhawk) to block") - case <-time.After(100 * time.Millisecond): + case <-time.After(3 * time.Second): assertReceive(t, "Ultron", subscription.Out()) assertReceive(t, "Darkhawk", subscription.Out()) } @@ -133,24 +136,75 @@ func TestDifferentClients(t *testing.T) { ctx := context.Background() subscription1, err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'")) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Iceman", map[string]string{"tm.events.type": "NewBlock"}) + err = s.PublishWithEvents(ctx, "Iceman", map[string][]string{"tm.events.type": {"NewBlock"}}) require.NoError(t, err) assertReceive(t, "Iceman", subscription1.Out()) subscription2, err := s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'")) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Ultimo", map[string]string{"tm.events.type": "NewBlock", "abci.account.name": "Igor"}) + err = s.PublishWithEvents(ctx, "Ultimo", map[string][]string{"tm.events.type": {"NewBlock"}, "abci.account.name": {"Igor"}}) require.NoError(t, err) assertReceive(t, "Ultimo", subscription1.Out()) assertReceive(t, "Ultimo", subscription2.Out()) subscription3, err := s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10")) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Valeria Richards", map[string]string{"tm.events.type": "NewRoundStep"}) + err = s.PublishWithEvents(ctx, "Valeria Richards", map[string][]string{"tm.events.type": {"NewRoundStep"}}) require.NoError(t, err) assert.Zero(t, len(subscription3.Out())) } +func TestSubscribeDuplicateKeys(t *testing.T) { + ctx := context.Background() + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + require.NoError(t, s.Start()) + defer s.Stop() + + testCases := []struct { + query string + expected interface{} + }{ + { + "withdraw.rewards='17'", + "Iceman", + }, + { + "withdraw.rewards='22'", + "Iceman", + }, + { + "withdraw.rewards='1' AND withdraw.rewards='22'", + "Iceman", + }, + { + "withdraw.rewards='100'", + nil, + }, + } + + for i, tc := range testCases { + sub, err := s.Subscribe(ctx, fmt.Sprintf("client-%d", i), query.MustParse(tc.query)) + require.NoError(t, err) + + err = s.PublishWithEvents( + ctx, + "Iceman", + map[string][]string{ + "transfer.sender": {"foo", "bar", "baz"}, + "withdraw.rewards": {"1", "17", "22"}, + }, + ) + require.NoError(t, err) + + if tc.expected != nil { + assertReceive(t, tc.expected, sub.Out()) + } else { + require.Zero(t, len(sub.Out())) + } + } +} + func TestClientSubscribesTwice(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) @@ -162,7 +216,7 @@ func TestClientSubscribesTwice(t *testing.T) { subscription1, err := s.Subscribe(ctx, clientID, q) require.NoError(t, err) - err = s.PublishWithTags(ctx, "Goblin Queen", map[string]string{"tm.events.type": "NewBlock"}) + err = s.PublishWithEvents(ctx, "Goblin Queen", map[string][]string{"tm.events.type": {"NewBlock"}}) require.NoError(t, err) assertReceive(t, "Goblin Queen", subscription1.Out()) @@ -170,7 +224,7 @@ func TestClientSubscribesTwice(t *testing.T) { require.Error(t, err) require.Nil(t, subscription2) - err = s.PublishWithTags(ctx, "Spider-Man", map[string]string{"tm.events.type": "NewBlock"}) + err = s.PublishWithEvents(ctx, "Spider-Man", map[string][]string{"tm.events.type": {"NewBlock"}}) require.NoError(t, err) assertReceive(t, "Spider-Man", subscription1.Out()) } @@ -309,7 +363,7 @@ func benchmarkNClients(n int, b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - s.PublishWithTags(ctx, "Gamora", map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": string(i)}) + s.PublishWithEvents(ctx, "Gamora", map[string][]string{"abci.Account.Owner": {"Ivan"}, "abci.Invoices.Number": {string(i)}}) } } @@ -340,7 +394,7 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - s.PublishWithTags(ctx, "Gamora", map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": "1"}) + s.PublishWithEvents(ctx, "Gamora", map[string][]string{"abci.Account.Owner": {"Ivan"}, "abci.Invoices.Number": {"1"}}) } } diff --git a/libs/pubsub/query/empty.go b/libs/pubsub/query/empty.go index 83271f04..2d7642ad 100644 --- a/libs/pubsub/query/empty.go +++ b/libs/pubsub/query/empty.go @@ -5,7 +5,7 @@ type Empty struct { } // Matches always returns true. -func (Empty) Matches(tags map[string]string) bool { +func (Empty) Matches(tags map[string][]string) bool { return true } diff --git a/libs/pubsub/query/empty_test.go b/libs/pubsub/query/empty_test.go index 141fb951..3fcd2d72 100644 --- a/libs/pubsub/query/empty_test.go +++ b/libs/pubsub/query/empty_test.go @@ -10,8 +10,8 @@ import ( func TestEmptyQueryMatchesAnything(t *testing.T) { q := query.Empty{} - assert.True(t, q.Matches(map[string]string{})) - assert.True(t, q.Matches(map[string]string{"Asher": "Roth"})) - assert.True(t, q.Matches(map[string]string{"Route": "66"})) - assert.True(t, q.Matches(map[string]string{"Route": "66", "Billy": "Blue"})) + assert.True(t, q.Matches(map[string][]string{})) + assert.True(t, q.Matches(map[string][]string{"Asher": {"Roth"}})) + assert.True(t, q.Matches(map[string][]string{"Route": {"66"}})) + assert.True(t, q.Matches(map[string][]string{"Route": {"66"}, "Billy": {"Blue"}})) } diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go index 189110a3..80dbfc05 100644 --- a/libs/pubsub/query/query.go +++ b/libs/pubsub/query/query.go @@ -148,12 +148,14 @@ func (q *Query) Conditions() []Condition { return conditions } -// Matches returns true if the query matches the given set of tags, false otherwise. +// Matches returns true if the query matches against any event in the given set +// of events, false otherwise. For each event, a match exists if the query is +// matched against *any* value in a slice of values. // -// For example, query "name=John" matches tags = {"name": "John"}. More -// examples could be found in parser_test.go and query_test.go. -func (q *Query) Matches(tags map[string]string) bool { - if len(tags) == 0 { +// For example, query "name=John" matches events = {"name": ["John", "Eric"]}. +// More examples could be found in parser_test.go and query_test.go. +func (q *Query) Matches(events map[string][]string) bool { + if len(events) == 0 { return false } @@ -162,7 +164,8 @@ func (q *Query) Matches(tags map[string]string) bool { var tag string var op Operator - // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") + // tokens must be in the following order: + // tag ("tx.gas") -> operator ("=") -> operand ("7") for _, token := range q.parser.Tokens() { switch token.pegRule { @@ -188,7 +191,7 @@ func (q *Query) Matches(tags map[string]string) bool { // see if the triplet (tag, operator, operand) matches any tag // "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } - if !match(tag, op, reflect.ValueOf(valueWithoutSingleQuotes), tags) { + if !match(tag, op, reflect.ValueOf(valueWithoutSingleQuotes), events) { return false } case rulenumber: @@ -198,7 +201,7 @@ func (q *Query) Matches(tags map[string]string) bool { if err != nil { panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number)) } - if !match(tag, op, reflect.ValueOf(value), tags) { + if !match(tag, op, reflect.ValueOf(value), events) { return false } } else { @@ -206,7 +209,7 @@ func (q *Query) Matches(tags map[string]string) bool { if err != nil { panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number)) } - if !match(tag, op, reflect.ValueOf(value), tags) { + if !match(tag, op, reflect.ValueOf(value), events) { return false } } @@ -215,7 +218,7 @@ func (q *Query) Matches(tags map[string]string) bool { if err != nil { panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end])) } - if !match(tag, op, reflect.ValueOf(value), tags) { + if !match(tag, op, reflect.ValueOf(value), events) { return false } case ruledate: @@ -223,7 +226,7 @@ func (q *Query) Matches(tags map[string]string) bool { if err != nil { panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end])) } - if !match(tag, op, reflect.ValueOf(value), tags) { + if !match(tag, op, reflect.ValueOf(value), events) { return false } } @@ -232,34 +235,53 @@ func (q *Query) Matches(tags map[string]string) bool { return true } -// match returns true if the given triplet (tag, operator, operand) matches any tag. +// match returns true if the given triplet (tag, operator, operand) matches any +// value in an event for that key. // -// First, it looks up the tag in tags and if it finds one, tries to compare the -// value from it to the operand using the operator. +// First, it looks up the key in the events and if it finds one, tries to compare +// all the values from it to the operand using the operator. // -// "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } -func match(tag string, op Operator, operand reflect.Value, tags map[string]string) bool { +// "tx.gas", "=", "7", {"tx": [{"gas": 7, "ID": "4AE393495334"}]} +func match(tag string, op Operator, operand reflect.Value, events map[string][]string) bool { // look up the tag from the query in tags - value, ok := tags[tag] + values, ok := events[tag] if !ok { return false } + + for _, value := range values { + // return true if any value in the set of the event's values matches + if matchValue(value, op, operand) { + return true + } + } + + return false +} + +// matchValue will attempt to match a string value against an operation an +// operand. A boolean is returned representing the match result. It will panic +// if an error occurs or if the operand is invalid. +func matchValue(value string, op Operator, operand reflect.Value) bool { switch operand.Kind() { case reflect.Struct: // time operandAsTime := operand.Interface().(time.Time) + // try our best to convert value from tags to time.Time var ( v time.Time err error ) + if strings.ContainsAny(value, "T") { v, err = time.Parse(TimeLayout, value) } else { v, err = time.Parse(DateLayout, value) } if err != nil { - panic(fmt.Sprintf("Failed to convert value %v from tag to time.Time: %v", value, err)) + panic(fmt.Sprintf("failed to convert value %v from tag to time.Time: %v", value, err)) } + switch op { case OpLessEqual: return v.Before(operandAsTime) || v.Equal(operandAsTime) @@ -272,14 +294,17 @@ func match(tag string, op Operator, operand reflect.Value, tags map[string]strin case OpEqual: return v.Equal(operandAsTime) } + case reflect.Float64: operandFloat64 := operand.Interface().(float64) var v float64 + // try our best to convert value from tags to float64 v, err := strconv.ParseFloat(value, 64) if err != nil { - panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err)) + panic(fmt.Sprintf("failed to convert value %v from tag to float64: %v", value, err)) } + switch op { case OpLessEqual: return v <= operandFloat64 @@ -292,6 +317,7 @@ func match(tag string, op Operator, operand reflect.Value, tags map[string]strin case OpEqual: return v == operandFloat64 } + case reflect.Int64: operandInt := operand.Interface().(int64) var v int64 @@ -299,7 +325,7 @@ func match(tag string, op Operator, operand reflect.Value, tags map[string]strin if strings.ContainsAny(value, ".") { v1, err := strconv.ParseFloat(value, 64) if err != nil { - panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err)) + panic(fmt.Sprintf("failed to convert value %v from tag to float64: %v", value, err)) } v = int64(v1) } else { @@ -307,7 +333,7 @@ func match(tag string, op Operator, operand reflect.Value, tags map[string]strin // try our best to convert value from tags to int64 v, err = strconv.ParseInt(value, 10, 64) if err != nil { - panic(fmt.Sprintf("Failed to convert value %v from tag to int64: %v", value, err)) + panic(fmt.Sprintf("failed to convert value %v from tag to int64: %v", value, err)) } } switch op { @@ -322,6 +348,7 @@ func match(tag string, op Operator, operand reflect.Value, tags map[string]strin case OpEqual: return v == operandInt } + case reflect.String: switch op { case OpEqual: @@ -329,8 +356,9 @@ func match(tag string, op Operator, operand reflect.Value, tags map[string]strin case OpContains: return strings.Contains(value, operand.String()) } + default: - panic(fmt.Sprintf("Unknown kind of operand %v", operand.Kind())) + panic(fmt.Sprintf("unknown kind of operand %v", operand.Kind())) } return false diff --git a/libs/pubsub/query/query_test.go b/libs/pubsub/query/query_test.go index a3d83b25..10dc3d22 100644 --- a/libs/pubsub/query/query_test.go +++ b/libs/pubsub/query/query_test.go @@ -19,30 +19,40 @@ func TestMatches(t *testing.T) { testCases := []struct { s string - tags map[string]string + events map[string][]string err bool matches bool }{ - {"tm.events.type='NewBlock'", map[string]string{"tm.events.type": "NewBlock"}, false, true}, + {"tm.events.type='NewBlock'", map[string][]string{"tm.events.type": {"NewBlock"}}, false, true}, - {"tx.gas > 7", map[string]string{"tx.gas": "8"}, false, true}, - {"tx.gas > 7 AND tx.gas < 9", map[string]string{"tx.gas": "8"}, false, true}, - {"body.weight >= 3.5", map[string]string{"body.weight": "3.5"}, false, true}, - {"account.balance < 1000.0", map[string]string{"account.balance": "900"}, false, true}, - {"apples.kg <= 4", map[string]string{"apples.kg": "4.0"}, false, true}, - {"body.weight >= 4.5", map[string]string{"body.weight": fmt.Sprintf("%v", float32(4.5))}, false, true}, - {"oranges.kg < 4 AND watermellons.kg > 10", map[string]string{"oranges.kg": "3", "watermellons.kg": "12"}, false, true}, - {"peaches.kg < 4", map[string]string{"peaches.kg": "5"}, false, false}, + {"tx.gas > 7", map[string][]string{"tx.gas": {"8"}}, false, true}, + {"tx.gas > 7 AND tx.gas < 9", map[string][]string{"tx.gas": {"8"}}, false, true}, + {"body.weight >= 3.5", map[string][]string{"body.weight": {"3.5"}}, false, true}, + {"account.balance < 1000.0", map[string][]string{"account.balance": {"900"}}, false, true}, + {"apples.kg <= 4", map[string][]string{"apples.kg": {"4.0"}}, false, true}, + {"body.weight >= 4.5", map[string][]string{"body.weight": {fmt.Sprintf("%v", float32(4.5))}}, false, true}, + {"oranges.kg < 4 AND watermellons.kg > 10", map[string][]string{"oranges.kg": {"3"}, "watermellons.kg": {"12"}}, false, true}, + {"peaches.kg < 4", map[string][]string{"peaches.kg": {"5"}}, false, false}, - {"tx.date > DATE 2017-01-01", map[string]string{"tx.date": time.Now().Format(query.DateLayout)}, false, true}, - {"tx.date = DATE 2017-01-01", map[string]string{"tx.date": txDate}, false, true}, - {"tx.date = DATE 2018-01-01", map[string]string{"tx.date": txDate}, false, false}, + {"tx.date > DATE 2017-01-01", map[string][]string{"tx.date": {time.Now().Format(query.DateLayout)}}, false, true}, + {"tx.date = DATE 2017-01-01", map[string][]string{"tx.date": {txDate}}, false, true}, + {"tx.date = DATE 2018-01-01", map[string][]string{"tx.date": {txDate}}, false, false}, - {"tx.time >= TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": time.Now().Format(query.TimeLayout)}, false, true}, - {"tx.time = TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": txTime}, false, false}, + {"tx.time >= TIME 2013-05-03T14:45:00Z", map[string][]string{"tx.time": {time.Now().Format(query.TimeLayout)}}, false, true}, + {"tx.time = TIME 2013-05-03T14:45:00Z", map[string][]string{"tx.time": {txTime}}, false, false}, - {"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Igor,Ivan"}, false, true}, - {"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Pavel,Ivan"}, false, false}, + {"abci.owner.name CONTAINS 'Igor'", map[string][]string{"abci.owner.name": {"Igor,Ivan"}}, false, true}, + {"abci.owner.name CONTAINS 'Igor'", map[string][]string{"abci.owner.name": {"Pavel,Ivan"}}, false, false}, + + {"abci.owner.name = 'Igor'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, true}, + {"abci.owner.name = 'Ivan'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, true}, + {"abci.owner.name = 'Ivan' AND abci.owner.name = 'Igor'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, true}, + {"abci.owner.name = 'Ivan' AND abci.owner.name = 'John'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, false}, + + {"tm.events.type='NewBlock'", map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, false, true}, + {"app.name = 'fuzzed'", map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, false, true}, + {"tm.events.type='NewBlock' AND app.name = 'fuzzed'", map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, false, true}, + {"tm.events.type='NewHeader' AND app.name = 'fuzzed'", map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, false, false}, } for _, tc := range testCases { @@ -51,10 +61,12 @@ func TestMatches(t *testing.T) { require.Nil(t, err) } + require.NotNil(t, q, "Query '%s' should not be nil", tc.s) + if tc.matches { - assert.True(t, q.Matches(tc.tags), "Query '%s' should match %v", tc.s, tc.tags) + assert.True(t, q.Matches(tc.events), "Query '%s' should match %v", tc.s, tc.events) } else { - assert.False(t, q.Matches(tc.tags), "Query '%s' should not match %v", tc.s, tc.tags) + assert.False(t, q.Matches(tc.events), "Query '%s' should not match %v", tc.s, tc.events) } } } diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go index 2660439f..40c97c9e 100644 --- a/libs/pubsub/subscription.go +++ b/libs/pubsub/subscription.go @@ -70,12 +70,12 @@ func (s *Subscription) cancel(err error) { // Message glues data and tags together. type Message struct { - data interface{} - tags map[string]string + data interface{} + events map[string][]string } -func NewMessage(data interface{}, tags map[string]string) Message { - return Message{data, tags} +func NewMessage(data interface{}, events map[string][]string) Message { + return Message{data, events} } // Data returns an original data published. @@ -83,7 +83,7 @@ func (msg Message) Data() interface{} { return msg.data } -// Tags returns tags, which matched the client's query. -func (msg Message) Tags() map[string]string { - return msg.tags +// Events returns events, which matched the client's query. +func (msg Message) Events() map[string][]string { + return msg.events } diff --git a/lite/proxy/proxy.go b/lite/proxy/proxy.go index d3c16d4a..80343a53 100644 --- a/lite/proxy/proxy.go +++ b/lite/proxy/proxy.go @@ -5,12 +5,15 @@ import ( "net/http" amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/libs/log" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpcserver "github.com/tendermint/tendermint/rpc/lib/server" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + "github.com/tendermint/tendermint/types" ) const ( @@ -66,21 +69,93 @@ func RPCRoutes(c rpcclient.Client) map[string]*rpcserver.RPCFunc { "unsubscribe_all": rpcserver.NewWSRPCFunc(c.(Wrapper).UnsubscribeAllWS, ""), // info API - "status": rpcserver.NewRPCFunc(c.Status, ""), - "blockchain": rpcserver.NewRPCFunc(c.BlockchainInfo, "minHeight,maxHeight"), - "genesis": rpcserver.NewRPCFunc(c.Genesis, ""), - "block": rpcserver.NewRPCFunc(c.Block, "height"), - "commit": rpcserver.NewRPCFunc(c.Commit, "height"), - "tx": rpcserver.NewRPCFunc(c.Tx, "hash,prove"), - "validators": rpcserver.NewRPCFunc(c.Validators, "height"), + "status": rpcserver.NewRPCFunc(makeStatusFunc(c), ""), + "blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight"), + "genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""), + "block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"), + "commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"), + "tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove"), + "validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height"), // broadcast API - "broadcast_tx_commit": rpcserver.NewRPCFunc(c.BroadcastTxCommit, "tx"), - "broadcast_tx_sync": rpcserver.NewRPCFunc(c.BroadcastTxSync, "tx"), - "broadcast_tx_async": rpcserver.NewRPCFunc(c.BroadcastTxAsync, "tx"), + "broadcast_tx_commit": rpcserver.NewRPCFunc(makeBroadcastTxCommitFunc(c), "tx"), + "broadcast_tx_sync": rpcserver.NewRPCFunc(makeBroadcastTxSyncFunc(c), "tx"), + "broadcast_tx_async": rpcserver.NewRPCFunc(makeBroadcastTxAsyncFunc(c), "tx"), // abci API - "abci_query": rpcserver.NewRPCFunc(c.ABCIQuery, "path,data"), - "abci_info": rpcserver.NewRPCFunc(c.ABCIInfo, ""), + "abci_query": rpcserver.NewRPCFunc(makeABCIQueryFunc(c), "path,data"), + "abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), ""), + } +} + +func makeStatusFunc(c rpcclient.Client) func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { + return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { + return c.Status() + } +} + +func makeBlockchainInfoFunc(c rpcclient.Client) func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + return c.BlockchainInfo(minHeight, maxHeight) + } +} + +func makeGenesisFunc(c rpcclient.Client) func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { + return func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { + return c.Genesis() + } +} + +func makeBlockFunc(c rpcclient.Client) func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) { + return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) { + return c.Block(height) + } +} + +func makeCommitFunc(c rpcclient.Client) func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) { + return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) { + return c.Commit(height) + } +} + +func makeTxFunc(c rpcclient.Client) func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { + return func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { + return c.Tx(hash, prove) + } +} + +func makeValidatorsFunc(c rpcclient.Client) func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultValidators, error) { + return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultValidators, error) { + return c.Validators(height) + } +} + +func makeBroadcastTxCommitFunc(c rpcclient.Client) func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + return c.BroadcastTxCommit(tx) + } +} + +func makeBroadcastTxSyncFunc(c rpcclient.Client) func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return c.BroadcastTxSync(tx) + } +} + +func makeBroadcastTxAsyncFunc(c rpcclient.Client) func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return c.BroadcastTxAsync(tx) + } +} + +func makeABCIQueryFunc(c rpcclient.Client) func(ctx *rpctypes.Context, path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { + return func(ctx *rpctypes.Context, path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { + return c.ABCIQuery(path, data) + } +} + +func makeABCIInfoFunc(c rpcclient.Client) func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { + return func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { + return c.ABCIInfo() } } diff --git a/lite/proxy/verifier.go b/lite/proxy/verifier.go index b7c11f18..429c54b2 100644 --- a/lite/proxy/verifier.go +++ b/lite/proxy/verifier.go @@ -14,7 +14,7 @@ func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logge logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client) memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(cacheSize) - lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir)) + lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.GoLevelDBBackend, rootDir)) trust := lite.NewMultiProvider( memProvider, lvlProvider, diff --git a/mempool/cache_test.go b/mempool/cache_test.go index ea9f63fd..539bf119 100644 --- a/mempool/cache_test.go +++ b/mempool/cache_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -66,7 +67,7 @@ func TestCacheAfterUpdate(t *testing.T) { tx := types.Tx{byte(v)} updateTxs = append(updateTxs, tx) } - mempool.Update(int64(tcIndex), updateTxs, nil, nil) + mempool.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil) for _, v := range tc.reAddIndices { tx := types.Tx{byte(v)} diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go new file mode 100644 index 00000000..0d1f3c5b --- /dev/null +++ b/mempool/clist_mempool.go @@ -0,0 +1,711 @@ +package mempool + +import ( + "bytes" + "container/list" + "crypto/sha256" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + + abci "github.com/tendermint/tendermint/abci/types" + cfg "github.com/tendermint/tendermint/config" + auto "github.com/tendermint/tendermint/libs/autofile" + "github.com/tendermint/tendermint/libs/clist" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" +) + +//-------------------------------------------------------------------------------- + +// CListMempool is an ordered in-memory pool for transactions before they are +// proposed in a consensus round. Transaction validity is checked using the +// CheckTx abci message before the transaction is added to the pool. The +// mempool uses a concurrent list structure for storing transactions that can +// be efficiently accessed by multiple concurrent readers. +type CListMempool struct { + config *cfg.MempoolConfig + + proxyMtx sync.Mutex + proxyAppConn proxy.AppConnMempool + txs *clist.CList // concurrent linked-list of good txs + preCheck PreCheckFunc + postCheck PostCheckFunc + + // Track whether we're rechecking txs. + // These are not protected by a mutex and are expected to be mutated + // in serial (ie. by abci responses which are called in serial). + recheckCursor *clist.CElement // next expected response + recheckEnd *clist.CElement // re-checking stops here + + // notify listeners (ie. consensus) when txs are available + notifiedTxsAvailable bool + txsAvailable chan struct{} // fires once for each height, when the mempool is not empty + + // Map for quick access to txs to record sender in CheckTx. + // txsMap: txKey -> CElement + txsMap sync.Map + + // Atomic integers + height int64 // the last block Update()'d to + rechecking int32 // for re-checking filtered txs on Update() + txsBytes int64 // total size of mempool, in bytes + + // Keep a cache of already-seen txs. + // This reduces the pressure on the proxyApp. + cache txCache + + // A log of mempool txs + wal *auto.AutoFile + + logger log.Logger + + metrics *Metrics +} + +var _ Mempool = &CListMempool{} + +// CListMempoolOption sets an optional parameter on the mempool. +type CListMempoolOption func(*CListMempool) + +// NewCListMempool returns a new mempool with the given configuration and connection to an application. +func NewCListMempool( + config *cfg.MempoolConfig, + proxyAppConn proxy.AppConnMempool, + height int64, + options ...CListMempoolOption, +) *CListMempool { + mempool := &CListMempool{ + config: config, + proxyAppConn: proxyAppConn, + txs: clist.New(), + height: height, + rechecking: 0, + recheckCursor: nil, + recheckEnd: nil, + logger: log.NewNopLogger(), + metrics: NopMetrics(), + } + if config.CacheSize > 0 { + mempool.cache = newMapTxCache(config.CacheSize) + } else { + mempool.cache = nopTxCache{} + } + proxyAppConn.SetResponseCallback(mempool.globalCb) + for _, option := range options { + option(mempool) + } + return mempool +} + +// NOTE: not thread safe - should only be called once, on startup +func (mem *CListMempool) EnableTxsAvailable() { + mem.txsAvailable = make(chan struct{}, 1) +} + +// SetLogger sets the Logger. +func (mem *CListMempool) SetLogger(l log.Logger) { + mem.logger = l +} + +// WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns +// false. This is ran before CheckTx. +func WithPreCheck(f PreCheckFunc) CListMempoolOption { + return func(mem *CListMempool) { mem.preCheck = f } +} + +// WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns +// false. This is ran after CheckTx. +func WithPostCheck(f PostCheckFunc) CListMempoolOption { + return func(mem *CListMempool) { mem.postCheck = f } +} + +// WithMetrics sets the metrics. +func WithMetrics(metrics *Metrics) CListMempoolOption { + return func(mem *CListMempool) { mem.metrics = metrics } +} + +// *panics* if can't create directory or open file. +// *not thread safe* +func (mem *CListMempool) InitWAL() { + walDir := mem.config.WalDir() + err := cmn.EnsureDir(walDir, 0700) + if err != nil { + panic(errors.Wrap(err, "Error ensuring WAL dir")) + } + af, err := auto.OpenAutoFile(walDir + "/wal") + if err != nil { + panic(errors.Wrap(err, "Error opening WAL file")) + } + mem.wal = af +} + +func (mem *CListMempool) CloseWAL() { + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() + + if err := mem.wal.Close(); err != nil { + mem.logger.Error("Error closing WAL", "err", err) + } + mem.wal = nil +} + +func (mem *CListMempool) Lock() { + mem.proxyMtx.Lock() +} + +func (mem *CListMempool) Unlock() { + mem.proxyMtx.Unlock() +} + +func (mem *CListMempool) Size() int { + return mem.txs.Len() +} + +func (mem *CListMempool) TxsBytes() int64 { + return atomic.LoadInt64(&mem.txsBytes) +} + +func (mem *CListMempool) FlushAppConn() error { + return mem.proxyAppConn.FlushSync() +} + +func (mem *CListMempool) Flush() { + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() + + mem.cache.Reset() + + for e := mem.txs.Front(); e != nil; e = e.Next() { + mem.txs.Remove(e) + e.DetachPrev() + } + + mem.txsMap = sync.Map{} + _ = atomic.SwapInt64(&mem.txsBytes, 0) +} + +// TxsFront returns the first transaction in the ordered list for peer +// goroutines to call .NextWait() on. +// FIXME: leaking implementation details! +func (mem *CListMempool) TxsFront() *clist.CElement { + return mem.txs.Front() +} + +// TxsWaitChan returns a channel to wait on transactions. It will be closed +// once the mempool is not empty (ie. the internal `mem.txs` has at least one +// element) +func (mem *CListMempool) TxsWaitChan() <-chan struct{} { + return mem.txs.WaitChan() +} + +// It blocks if we're waiting on Update() or Reap(). +// cb: A callback from the CheckTx command. +// It gets called from another goroutine. +// CONTRACT: Either cb will get called, or err returned. +func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { + return mem.CheckTxWithInfo(tx, cb, TxInfo{SenderID: UnknownPeerID}) +} + +func (mem *CListMempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) (err error) { + mem.proxyMtx.Lock() + // use defer to unlock mutex because application (*local client*) might panic + defer mem.proxyMtx.Unlock() + + var ( + memSize = mem.Size() + txsBytes = mem.TxsBytes() + ) + if memSize >= mem.config.Size || + int64(len(tx))+txsBytes > mem.config.MaxTxsBytes { + return ErrMempoolIsFull{ + memSize, mem.config.Size, + txsBytes, mem.config.MaxTxsBytes} + } + + // The size of the corresponding amino-encoded TxMessage + // can't be larger than the maxMsgSize, otherwise we can't + // relay it to peers. + if len(tx) > maxTxSize { + return ErrTxTooLarge + } + + if mem.preCheck != nil { + if err := mem.preCheck(tx); err != nil { + return ErrPreCheck{err} + } + } + + // CACHE + if !mem.cache.Push(tx) { + // Record a new sender for a tx we've already seen. + // Note it's possible a tx is still in the cache but no longer in the mempool + // (eg. after committing a block, txs are removed from mempool but not cache), + // so we only record the sender for txs still in the mempool. + if e, ok := mem.txsMap.Load(txKey(tx)); ok { + memTx := e.(*clist.CElement).Value.(*mempoolTx) + if _, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true); loaded { + // TODO: consider punishing peer for dups, + // its non-trivial since invalid txs can become valid, + // but they can spam the same tx with little cost to them atm. + } + } + + return ErrTxInCache + } + // END CACHE + + // WAL + if mem.wal != nil { + // TODO: Notify administrators when WAL fails + _, err := mem.wal.Write([]byte(tx)) + if err != nil { + mem.logger.Error("Error writing to WAL", "err", err) + } + _, err = mem.wal.Write([]byte("\n")) + if err != nil { + mem.logger.Error("Error writing to WAL", "err", err) + } + } + // END WAL + + // NOTE: proxyAppConn may error if tx buffer is full + if err = mem.proxyAppConn.Error(); err != nil { + return err + } + + reqRes := mem.proxyAppConn.CheckTxAsync(tx) + reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, cb)) + + return nil +} + +// Global callback that will be called after every ABCI response. +// Having a single global callback avoids needing to set a callback for each request. +// However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), +// and peerID is not included in the ABCI request, so we have to set request-specific callbacks that +// include this information. If we're not in the midst of a recheck, this function will just return, +// so the request specific callback can do the work. +// When rechecking, we don't need the peerID, so the recheck callback happens here. +func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { + if mem.recheckCursor == nil { + return + } + + mem.metrics.RecheckTimes.Add(1) + mem.resCbRecheck(req, res) + + // update metrics + mem.metrics.Size.Set(float64(mem.Size())) +} + +// Request specific callback that should be set on individual reqRes objects +// to incorporate local information when processing the response. +// This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. +// NOTE: alternatively, we could include this information in the ABCI request itself. +// +// External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called +// when all other response processing is complete. +// +// Used in CheckTxWithInfo to record PeerID who sent us the tx. +func (mem *CListMempool) reqResCb(tx []byte, peerID uint16, externalCb func(*abci.Response)) func(res *abci.Response) { + return func(res *abci.Response) { + if mem.recheckCursor != nil { + // this should never happen + panic("recheck cursor is not nil in reqResCb") + } + + mem.resCbFirstTime(tx, peerID, res) + + // update metrics + mem.metrics.Size.Set(float64(mem.Size())) + + // passed in by the caller of CheckTx, eg. the RPC + if externalCb != nil { + externalCb(res) + } + } +} + +// Called from: +// - resCbFirstTime (lock not held) if tx is valid +func (mem *CListMempool) addTx(memTx *mempoolTx) { + e := mem.txs.PushBack(memTx) + mem.txsMap.Store(txKey(memTx.tx), e) + atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) + mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) +} + +// Called from: +// - Update (lock held) if tx was committed +// - resCbRecheck (lock not held) if tx was invalidated +func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { + mem.txs.Remove(elem) + elem.DetachPrev() + mem.txsMap.Delete(txKey(tx)) + atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) + + if removeFromCache { + mem.cache.Remove(tx) + } +} + +// callback, which is called after the app checked the tx for the first time. +// +// The case where the app checks the tx for the second and subsequent times is +// handled by the resCbRecheck callback. +func (mem *CListMempool) resCbFirstTime(tx []byte, peerID uint16, res *abci.Response) { + switch r := res.Value.(type) { + case *abci.Response_CheckTx: + var postCheckErr error + if mem.postCheck != nil { + postCheckErr = mem.postCheck(tx, r.CheckTx) + } + if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { + memTx := &mempoolTx{ + height: mem.height, + gasWanted: r.CheckTx.GasWanted, + tx: tx, + } + memTx.senders.Store(peerID, true) + mem.addTx(memTx) + mem.logger.Info("Added good transaction", + "tx", txID(tx), + "res", r, + "height", memTx.height, + "total", mem.Size(), + ) + mem.notifyTxsAvailable() + } else { + // ignore bad transaction + mem.logger.Info("Rejected bad transaction", "tx", txID(tx), "res", r, "err", postCheckErr) + mem.metrics.FailedTxs.Add(1) + // remove from cache (it might be good later) + mem.cache.Remove(tx) + } + default: + // ignore other messages + } +} + +// callback, which is called after the app rechecked the tx. +// +// The case where the app checks the tx for the first time is handled by the +// resCbFirstTime callback. +func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { + switch r := res.Value.(type) { + case *abci.Response_CheckTx: + tx := req.GetCheckTx().Tx + memTx := mem.recheckCursor.Value.(*mempoolTx) + if !bytes.Equal(tx, memTx.tx) { + panic(fmt.Sprintf( + "Unexpected tx response from proxy during recheck\nExpected %X, got %X", + memTx.tx, + tx)) + } + var postCheckErr error + if mem.postCheck != nil { + postCheckErr = mem.postCheck(tx, r.CheckTx) + } + if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { + // Good, nothing to do. + } else { + // Tx became invalidated due to newly committed block. + mem.logger.Info("Tx is no longer valid", "tx", txID(tx), "res", r, "err", postCheckErr) + // NOTE: we remove tx from the cache because it might be good later + mem.removeTx(tx, mem.recheckCursor, true) + } + if mem.recheckCursor == mem.recheckEnd { + mem.recheckCursor = nil + } else { + mem.recheckCursor = mem.recheckCursor.Next() + } + if mem.recheckCursor == nil { + // Done! + atomic.StoreInt32(&mem.rechecking, 0) + mem.logger.Info("Done rechecking txs") + + // incase the recheck removed all txs + if mem.Size() > 0 { + mem.notifyTxsAvailable() + } + } + default: + // ignore other messages + } +} + +func (mem *CListMempool) TxsAvailable() <-chan struct{} { + return mem.txsAvailable +} + +func (mem *CListMempool) notifyTxsAvailable() { + if mem.Size() == 0 { + panic("notified txs available but mempool is empty!") + } + if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { + // channel cap is 1, so this will send once + mem.notifiedTxsAvailable = true + select { + case mem.txsAvailable <- struct{}{}: + default: + } + } +} + +func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() + + for atomic.LoadInt32(&mem.rechecking) > 0 { + // TODO: Something better? + time.Sleep(time.Millisecond * 10) + } + + var totalBytes int64 + var totalGas int64 + // TODO: we will get a performance boost if we have a good estimate of avg + // size per tx, and set the initial capacity based off of that. + // txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max/mem.avgTxSize)) + txs := make([]types.Tx, 0, mem.txs.Len()) + for e := mem.txs.Front(); e != nil; e = e.Next() { + memTx := e.Value.(*mempoolTx) + // Check total size requirement + aminoOverhead := types.ComputeAminoOverhead(memTx.tx, 1) + if maxBytes > -1 && totalBytes+int64(len(memTx.tx))+aminoOverhead > maxBytes { + return txs + } + totalBytes += int64(len(memTx.tx)) + aminoOverhead + // Check total gas requirement. + // If maxGas is negative, skip this check. + // Since newTotalGas < masGas, which + // must be non-negative, it follows that this won't overflow. + newTotalGas := totalGas + memTx.gasWanted + if maxGas > -1 && newTotalGas > maxGas { + return txs + } + totalGas = newTotalGas + txs = append(txs, memTx.tx) + } + return txs +} + +func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() + + if max < 0 { + max = mem.txs.Len() + } + + for atomic.LoadInt32(&mem.rechecking) > 0 { + // TODO: Something better? + time.Sleep(time.Millisecond * 10) + } + + txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max)) + for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { + memTx := e.Value.(*mempoolTx) + txs = append(txs, memTx.tx) + } + return txs +} + +func (mem *CListMempool) Update( + height int64, + txs types.Txs, + deliverTxResponses []*abci.ResponseDeliverTx, + preCheck PreCheckFunc, + postCheck PostCheckFunc, +) error { + // Set height + mem.height = height + mem.notifiedTxsAvailable = false + + if preCheck != nil { + mem.preCheck = preCheck + } + if postCheck != nil { + mem.postCheck = postCheck + } + + for i, tx := range txs { + if deliverTxResponses[i].Code == abci.CodeTypeOK { + // Add valid committed tx to the cache (if missing). + _ = mem.cache.Push(tx) + } else { + // Allow invalid transactions to be resubmitted. + mem.cache.Remove(tx) + } + + // Remove committed tx from the mempool. + // + // Note an evil proposer can drop valid txs! + // Mempool before: + // 100 -> 101 -> 102 + // Block, proposed by an evil proposer: + // 101 -> 102 + // Mempool after: + // 100 + // https://github.com/tendermint/tendermint/issues/3322. + if e, ok := mem.txsMap.Load(txKey(tx)); ok { + mem.removeTx(tx, e.(*clist.CElement), false) + } + } + + // Either recheck non-committed txs to see if they became invalid + // or just notify there're some txs left. + if mem.Size() > 0 { + if mem.config.Recheck { + mem.logger.Info("Recheck txs", "numtxs", mem.Size(), "height", height) + mem.recheckTxs() + // At this point, mem.txs are being rechecked. + // mem.recheckCursor re-scans mem.txs and possibly removes some txs. + // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. + } else { + mem.notifyTxsAvailable() + } + } + + // Update metrics + mem.metrics.Size.Set(float64(mem.Size())) + + return nil +} + +func (mem *CListMempool) recheckTxs() { + if mem.Size() == 0 { + panic("recheckTxs is called, but the mempool is empty") + } + + atomic.StoreInt32(&mem.rechecking, 1) + mem.recheckCursor = mem.txs.Front() + mem.recheckEnd = mem.txs.Back() + + // Push txs to proxyAppConn + // NOTE: globalCb may be called concurrently. + for e := mem.txs.Front(); e != nil; e = e.Next() { + memTx := e.Value.(*mempoolTx) + mem.proxyAppConn.CheckTxAsync(memTx.tx) + } + + mem.proxyAppConn.FlushAsync() +} + +//-------------------------------------------------------------------------------- + +// mempoolTx is a transaction that successfully ran +type mempoolTx struct { + height int64 // height that this tx had been validated in + gasWanted int64 // amount of gas this tx states it will require + tx types.Tx // + + // ids of peers who've sent us this tx (as a map for quick lookups). + // senders: PeerID -> bool + senders sync.Map +} + +// Height returns the height for this transaction +func (memTx *mempoolTx) Height() int64 { + return atomic.LoadInt64(&memTx.height) +} + +//-------------------------------------------------------------------------------- + +type txCache interface { + Reset() + Push(tx types.Tx) bool + Remove(tx types.Tx) +} + +// mapTxCache maintains a LRU cache of transactions. This only stores the hash +// of the tx, due to memory concerns. +type mapTxCache struct { + mtx sync.Mutex + size int + map_ map[[sha256.Size]byte]*list.Element + list *list.List +} + +var _ txCache = (*mapTxCache)(nil) + +// newMapTxCache returns a new mapTxCache. +func newMapTxCache(cacheSize int) *mapTxCache { + return &mapTxCache{ + size: cacheSize, + map_: make(map[[sha256.Size]byte]*list.Element, cacheSize), + list: list.New(), + } +} + +// Reset resets the cache to an empty state. +func (cache *mapTxCache) Reset() { + cache.mtx.Lock() + cache.map_ = make(map[[sha256.Size]byte]*list.Element, cache.size) + cache.list.Init() + cache.mtx.Unlock() +} + +// Push adds the given tx to the cache and returns true. It returns +// false if tx is already in the cache. +func (cache *mapTxCache) Push(tx types.Tx) bool { + cache.mtx.Lock() + defer cache.mtx.Unlock() + + // Use the tx hash in the cache + txHash := txKey(tx) + if moved, exists := cache.map_[txHash]; exists { + cache.list.MoveToBack(moved) + return false + } + + if cache.list.Len() >= cache.size { + popped := cache.list.Front() + poppedTxHash := popped.Value.([sha256.Size]byte) + delete(cache.map_, poppedTxHash) + if popped != nil { + cache.list.Remove(popped) + } + } + e := cache.list.PushBack(txHash) + cache.map_[txHash] = e + return true +} + +// Remove removes the given tx from the cache. +func (cache *mapTxCache) Remove(tx types.Tx) { + cache.mtx.Lock() + txHash := txKey(tx) + popped := cache.map_[txHash] + delete(cache.map_, txHash) + if popped != nil { + cache.list.Remove(popped) + } + + cache.mtx.Unlock() +} + +type nopTxCache struct{} + +var _ txCache = (*nopTxCache)(nil) + +func (nopTxCache) Reset() {} +func (nopTxCache) Push(types.Tx) bool { return true } +func (nopTxCache) Remove(types.Tx) {} + +//-------------------------------------------------------------------------------- + +// txKey is the fixed length array sha256 hash used as the key in maps. +func txKey(tx types.Tx) [sha256.Size]byte { + return sha256.Sum256(tx) +} + +// txID is the hex encoded hash of the bytes as a types.Tx. +func txID(tx []byte) string { + return fmt.Sprintf("%X", types.Tx(tx).Hash()) +} diff --git a/mempool/mempool_test.go b/mempool/clist_mempool_test.go similarity index 88% rename from mempool/mempool_test.go rename to mempool/clist_mempool_test.go index d5f25396..bf2c61dd 100644 --- a/mempool/mempool_test.go +++ b/mempool/clist_mempool_test.go @@ -32,18 +32,18 @@ import ( // test. type cleanupFunc func() -func newMempoolWithApp(cc proxy.ClientCreator) (*Mempool, cleanupFunc) { +func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) { return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test")) } -func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*Mempool, cleanupFunc) { +func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*CListMempool, cleanupFunc) { appConnMem, _ := cc.NewABCIClient() appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) err := appConnMem.Start() if err != nil { panic(err) } - mempool := NewMempool(config.Mempool, appConnMem, 0) + mempool := NewCListMempool(config.Mempool, appConnMem, 0) mempool.SetLogger(log.TestingLogger()) return mempool, func() { os.RemoveAll(config.RootDir) } } @@ -66,9 +66,9 @@ func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { } } -func checkTxs(t *testing.T, mempool *Mempool, count int, peerID uint16) types.Txs { +func checkTxs(t *testing.T, mempool Mempool, count int, peerID uint16) types.Txs { txs := make(types.Txs, count) - txInfo := TxInfo{PeerID: peerID} + txInfo := TxInfo{SenderID: peerID} for i := 0; i < count; i++ { txBytes := make([]byte, 20) txs[i] = txBytes @@ -170,22 +170,45 @@ func TestMempoolFilters(t *testing.T) { {10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(0), 0}, } for tcIndex, tt := range tests { - mempool.Update(1, emptyTxArr, tt.preFilter, tt.postFilter) + mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID) require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex) mempool.Flush() } } -func TestMempoolUpdateAddsTxsToCache(t *testing.T) { +func TestMempoolUpdate(t *testing.T) { app := kvstore.NewKVStoreApplication() cc := proxy.NewLocalClientCreator(app) mempool, cleanup := newMempoolWithApp(cc) defer cleanup() - mempool.Update(1, []types.Tx{[]byte{0x01}}, nil, nil) - err := mempool.CheckTx([]byte{0x01}, nil) - if assert.Error(t, err) { - assert.Equal(t, ErrTxInCache, err) + + // 1. Adds valid txs to the cache + { + mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) + err := mempool.CheckTx([]byte{0x01}, nil) + if assert.Error(t, err) { + assert.Equal(t, ErrTxInCache, err) + } + } + + // 2. Removes valid txs from the mempool + { + err := mempool.CheckTx([]byte{0x02}, nil) + require.NoError(t, err) + mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) + assert.Zero(t, mempool.Size()) + } + + // 3. Removes invalid transactions from the cache and the mempool (if present) + { + err := mempool.CheckTx([]byte{0x03}, nil) + require.NoError(t, err) + mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) + assert.Zero(t, mempool.Size()) + + err = mempool.CheckTx([]byte{0x03}, nil) + assert.NoError(t, err) } } @@ -210,7 +233,7 @@ func TestTxsAvailable(t *testing.T) { // it should fire once now for the new height // since there are still txs left committedTxs, txs := txs[:50], txs[50:] - if err := mempool.Update(1, committedTxs, nil, nil); err != nil { + if err := mempool.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { t.Error(err) } ensureFire(t, mempool.TxsAvailable(), timeoutMS) @@ -222,7 +245,7 @@ func TestTxsAvailable(t *testing.T) { // now call update with all the txs. it should not fire as there are no txs left committedTxs = append(txs, moreTxs...) - if err := mempool.Update(2, committedTxs, nil, nil); err != nil { + if err := mempool.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { t.Error(err) } ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) @@ -281,7 +304,7 @@ func TestSerialReap(t *testing.T) { binary.BigEndian.PutUint64(txBytes, uint64(i)) txs = append(txs, txBytes) } - if err := mempool.Update(0, txs, nil, nil); err != nil { + if err := mempool.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil { t.Error(err) } } @@ -348,7 +371,6 @@ func TestMempoolCloseWAL(t *testing.T) { // 1. Create the temporary directory for mempool and WAL testing. rootDir, err := ioutil.TempDir("", "mempool-test") require.Nil(t, err, "expecting successful tmpdir creation") - defer os.RemoveAll(rootDir) // 2. Ensure that it doesn't contain any elements -- Sanity check m1, err := filepath.Glob(filepath.Join(rootDir, "*")) @@ -356,13 +378,13 @@ func TestMempoolCloseWAL(t *testing.T) { require.Equal(t, 0, len(m1), "no matches yet") // 3. Create the mempool - wcfg := cfg.DefaultMempoolConfig() - wcfg.RootDir = rootDir - defer os.RemoveAll(wcfg.RootDir) + wcfg := cfg.DefaultConfig() + wcfg.Mempool.RootDir = rootDir app := kvstore.NewKVStoreApplication() cc := proxy.NewLocalClientCreator(app) - appConnMem, _ := cc.NewABCIClient() - mempool := NewMempool(wcfg, appConnMem, 10) + mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg) + defer cleanup() + mempool.height = 10 mempool.InitWAL() // 4. Ensure that the directory contains the WAL file @@ -463,7 +485,7 @@ func TestMempoolTxsBytes(t *testing.T) { assert.EqualValues(t, 1, mempool.TxsBytes()) // 3. zero again after tx is removed by Update - mempool.Update(1, []types.Tx{[]byte{0x01}}, nil, nil) + mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) assert.EqualValues(t, 0, mempool.TxsBytes()) // 4. zero after Flush @@ -508,7 +530,7 @@ func TestMempoolTxsBytes(t *testing.T) { require.NotEmpty(t, res2.Data) // Pretend like we committed nothing so txBytes gets rechecked and removed. - mempool.Update(1, []types.Tx{}, nil, nil) + mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) assert.EqualValues(t, 0, mempool.TxsBytes()) } @@ -542,7 +564,7 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { tx := txs[int(txNum)] // this will err with ErrTxInCache many times ... - mempool.CheckTxWithInfo(tx, nil, TxInfo{PeerID: uint16(peerID)}) + mempool.CheckTxWithInfo(tx, nil, TxInfo{SenderID: uint16(peerID)}) } err := mempool.FlushAppConn() require.NoError(t, err) @@ -571,3 +593,11 @@ func checksumFile(p string, t *testing.T) string { require.Nil(t, err, "expecting successful read of %q", p) return checksumIt(data) } + +func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx { + responses := make([]*abci.ResponseDeliverTx, 0, n) + for i := 0; i < n; i++ { + responses = append(responses, &abci.ResponseDeliverTx{Code: code}) + } + return responses +} diff --git a/mempool/doc.go b/mempool/doc.go new file mode 100644 index 00000000..ddd47aa2 --- /dev/null +++ b/mempool/doc.go @@ -0,0 +1,24 @@ +// The mempool pushes new txs onto the proxyAppConn. +// It gets a stream of (req, res) tuples from the proxy. +// The mempool stores good txs in a concurrent linked-list. + +// Multiple concurrent go-routines can traverse this linked-list +// safely by calling .NextWait() on each element. + +// So we have several go-routines: +// 1. Consensus calling Update() and Reap() synchronously +// 2. Many mempool reactor's peer routines calling CheckTx() +// 3. Many mempool reactor's peer routines traversing the txs linked list +// 4. Another goroutine calling GarbageCollectTxs() periodically + +// To manage these goroutines, there are three methods of locking. +// 1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe) +// 2. Mutations to the linked-list elements are atomic +// 3. CheckTx() calls can be paused upon Update() and Reap(), protected by .proxyMtx + +// Garbage collection of old elements from mempool.txs is handlde via +// the DetachPrev() call, which makes old elements not reachable by +// peer broadcastTxRoutine() automatically garbage collected. + +// TODO: Better handle abci client errors. (make it automatically handle connection errors) +package mempool diff --git a/mempool/errors.go b/mempool/errors.go new file mode 100644 index 00000000..ac2a9b3c --- /dev/null +++ b/mempool/errors.go @@ -0,0 +1,46 @@ +package mempool + +import ( + "fmt" + + "github.com/pkg/errors" +) + +var ( + // ErrTxInCache is returned to the client if we saw tx earlier + ErrTxInCache = errors.New("Tx already exists in cache") + + // ErrTxTooLarge means the tx is too big to be sent in a message to other peers + ErrTxTooLarge = fmt.Errorf("Tx too large. Max size is %d", maxTxSize) +) + +// ErrMempoolIsFull means Tendermint & an application can't handle that much load +type ErrMempoolIsFull struct { + numTxs int + maxTxs int + + txsBytes int64 + maxTxsBytes int64 +} + +func (e ErrMempoolIsFull) Error() string { + return fmt.Sprintf( + "mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)", + e.numTxs, e.maxTxs, + e.txsBytes, e.maxTxsBytes) +} + +// ErrPreCheck is returned when tx is too big +type ErrPreCheck struct { + Reason error +} + +func (e ErrPreCheck) Error() string { + return e.Reason.Error() +} + +// IsPreCheckError returns true if err is due to pre check failure. +func IsPreCheckError(err error) bool { + _, ok := err.(ErrPreCheck) + return ok +} diff --git a/mempool/mempool.go b/mempool/mempool.go index a5b14466..0995c734 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -1,26 +1,82 @@ package mempool import ( - "bytes" - "container/list" - "crypto/sha256" "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/pkg/errors" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - auto "github.com/tendermint/tendermint/libs/autofile" - "github.com/tendermint/tendermint/libs/clist" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) +// Mempool defines the mempool interface. +// +// Updates to the mempool need to be synchronized with committing a block so +// apps can reset their transient state on Commit. +type Mempool interface { + // CheckTx executes a new transaction against the application to determine + // its validity and whether it should be added to the mempool. + CheckTx(tx types.Tx, callback func(*abci.Response)) error + + // CheckTxWithInfo performs the same operation as CheckTx, but with extra + // meta data about the tx. + // Currently this metadata is the peer who sent it, used to prevent the tx + // from being gossiped back to them. + CheckTxWithInfo(tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error + + // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes + // bytes total with the condition that the total gasWanted must be less than + // maxGas. + // If both maxes are negative, there is no cap on the size of all returned + // transactions (~ all available transactions). + ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs + + // ReapMaxTxs reaps up to max transactions from the mempool. + // If max is negative, there is no cap on the size of all returned + // transactions (~ all available transactions). + ReapMaxTxs(max int) types.Txs + + // Lock locks the mempool. The consensus must be able to hold lock to safely update. + Lock() + + // Unlock unlocks the mempool. + Unlock() + + // Update informs the mempool that the given txs were committed and can be discarded. + // NOTE: this should be called *after* block is committed by consensus. + // NOTE: unsafe; Lock/Unlock must be managed by caller + Update(blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, newPreFn PreCheckFunc, newPostFn PostCheckFunc) error + + // FlushAppConn flushes the mempool connection to ensure async reqResCb calls are + // done. E.g. from CheckTx. + FlushAppConn() error + + // Flush removes all transactions from the mempool and cache + Flush() + + // TxsAvailable returns a channel which fires once for every height, + // and only when transactions are available in the mempool. + // NOTE: the returned channel may be nil if EnableTxsAvailable was not called. + TxsAvailable() <-chan struct{} + + // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will + // trigger once every height when transactions are available. + EnableTxsAvailable() + + // Size returns the number of transactions in the mempool. + Size() int + + // TxsBytes returns the total size of all txs in the mempool. + TxsBytes() int64 + + // InitWAL creates a directory for the WAL file and opens a file itself. + InitWAL() + + // CloseWAL closes and discards the underlying WAL file. + // Any further writes will not be relayed to disk. + CloseWAL() +} + +//-------------------------------------------------------------------------------- + // PreCheckFunc is an optional filter executed before CheckTx and rejects // transaction if false is returned. An example would be to ensure that a // transaction doesn't exceeded the block size. @@ -36,75 +92,10 @@ type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error type TxInfo struct { // We don't use p2p.ID here because it's too big. The gain is to store max 2 // bytes with each tx to identify the sender rather than 20 bytes. - PeerID uint16 + SenderID uint16 } -/* - -The mempool pushes new txs onto the proxyAppConn. -It gets a stream of (req, res) tuples from the proxy. -The mempool stores good txs in a concurrent linked-list. - -Multiple concurrent go-routines can traverse this linked-list -safely by calling .NextWait() on each element. - -So we have several go-routines: -1. Consensus calling Update() and Reap() synchronously -2. Many mempool reactor's peer routines calling CheckTx() -3. Many mempool reactor's peer routines traversing the txs linked list -4. Another goroutine calling GarbageCollectTxs() periodically - -To manage these goroutines, there are three methods of locking. -1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe) -2. Mutations to the linked-list elements are atomic -3. CheckTx() calls can be paused upon Update() and Reap(), protected by .proxyMtx - -Garbage collection of old elements from mempool.txs is handlde via -the DetachPrev() call, which makes old elements not reachable by -peer broadcastTxRoutine() automatically garbage collected. - -TODO: Better handle abci client errors. (make it automatically handle connection errors) - -*/ - -var ( - // ErrTxInCache is returned to the client if we saw tx earlier - ErrTxInCache = errors.New("Tx already exists in cache") - - // ErrTxTooLarge means the tx is too big to be sent in a message to other peers - ErrTxTooLarge = fmt.Errorf("Tx too large. Max size is %d", maxTxSize) -) - -// ErrMempoolIsFull means Tendermint & an application can't handle that much load -type ErrMempoolIsFull struct { - numTxs int - maxTxs int - - txsBytes int64 - maxTxsBytes int64 -} - -func (e ErrMempoolIsFull) Error() string { - return fmt.Sprintf( - "Mempool is full: number of txs %d (max: %d), total txs bytes %d (max: %d)", - e.numTxs, e.maxTxs, - e.txsBytes, e.maxTxsBytes) -} - -// ErrPreCheck is returned when tx is too big -type ErrPreCheck struct { - Reason error -} - -func (e ErrPreCheck) Error() string { - return e.Reason.Error() -} - -// IsPreCheckError returns true if err is due to pre check failure. -func IsPreCheckError(err error) bool { - _, ok := err.(ErrPreCheck) - return ok -} +//-------------------------------------------------------------------------------- // PreCheckAminoMaxBytes checks that the size of the transaction plus the amino // overhead is smaller or equal to the expected maxBytes. @@ -143,718 +134,3 @@ func PostCheckMaxGas(maxGas int64) PostCheckFunc { return nil } } - -// TxID is the hex encoded hash of the bytes as a types.Tx. -func TxID(tx []byte) string { - return fmt.Sprintf("%X", types.Tx(tx).Hash()) -} - -// txKey is the fixed length array sha256 hash used as the key in maps. -func txKey(tx types.Tx) [sha256.Size]byte { - return sha256.Sum256(tx) -} - -// Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus -// round. Transaction validity is checked using the CheckTx abci message before the transaction is -// added to the pool. The Mempool uses a concurrent list structure for storing transactions that -// can be efficiently accessed by multiple concurrent readers. -type Mempool struct { - config *cfg.MempoolConfig - - proxyMtx sync.Mutex - proxyAppConn proxy.AppConnMempool - txs *clist.CList // concurrent linked-list of good txs - preCheck PreCheckFunc - postCheck PostCheckFunc - - // Track whether we're rechecking txs. - // These are not protected by a mutex and are expected to be mutated - // in serial (ie. by abci responses which are called in serial). - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here - - // notify listeners (ie. consensus) when txs are available - notifiedTxsAvailable bool - txsAvailable chan struct{} // fires once for each height, when the mempool is not empty - - // Map for quick access to txs to record sender in CheckTx. - // txsMap: txKey -> CElement - txsMap sync.Map - - // Atomic integers - height int64 // the last block Update()'d to - rechecking int32 // for re-checking filtered txs on Update() - txsBytes int64 // total size of mempool, in bytes - - // Keep a cache of already-seen txs. - // This reduces the pressure on the proxyApp. - cache txCache - - // A log of mempool txs - wal *auto.AutoFile - - logger log.Logger - - metrics *Metrics -} - -// MempoolOption sets an optional parameter on the Mempool. -type MempoolOption func(*Mempool) - -// NewMempool returns a new Mempool with the given configuration and connection to an application. -func NewMempool( - config *cfg.MempoolConfig, - proxyAppConn proxy.AppConnMempool, - height int64, - options ...MempoolOption, -) *Mempool { - mempool := &Mempool{ - config: config, - proxyAppConn: proxyAppConn, - txs: clist.New(), - height: height, - rechecking: 0, - recheckCursor: nil, - recheckEnd: nil, - logger: log.NewNopLogger(), - metrics: NopMetrics(), - } - if config.CacheSize > 0 { - mempool.cache = newMapTxCache(config.CacheSize) - } else { - mempool.cache = nopTxCache{} - } - proxyAppConn.SetResponseCallback(mempool.globalCb) - for _, option := range options { - option(mempool) - } - return mempool -} - -// EnableTxsAvailable initializes the TxsAvailable channel, -// ensuring it will trigger once every height when transactions are available. -// NOTE: not thread safe - should only be called once, on startup -func (mem *Mempool) EnableTxsAvailable() { - mem.txsAvailable = make(chan struct{}, 1) -} - -// SetLogger sets the Logger. -func (mem *Mempool) SetLogger(l log.Logger) { - mem.logger = l -} - -// WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran before CheckTx. -func WithPreCheck(f PreCheckFunc) MempoolOption { - return func(mem *Mempool) { mem.preCheck = f } -} - -// WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran after CheckTx. -func WithPostCheck(f PostCheckFunc) MempoolOption { - return func(mem *Mempool) { mem.postCheck = f } -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *Metrics) MempoolOption { - return func(mem *Mempool) { mem.metrics = metrics } -} - -// InitWAL creates a directory for the WAL file and opens a file itself. -// -// *panics* if can't create directory or open file. -// *not thread safe* -func (mem *Mempool) InitWAL() { - walDir := mem.config.WalDir() - err := cmn.EnsureDir(walDir, 0700) - if err != nil { - panic(errors.Wrap(err, "Error ensuring Mempool WAL dir")) - } - af, err := auto.OpenAutoFile(walDir + "/wal") - if err != nil { - panic(errors.Wrap(err, "Error opening Mempool WAL file")) - } - mem.wal = af -} - -// CloseWAL closes and discards the underlying WAL file. -// Any further writes will not be relayed to disk. -func (mem *Mempool) CloseWAL() { - mem.proxyMtx.Lock() - defer mem.proxyMtx.Unlock() - - if err := mem.wal.Close(); err != nil { - mem.logger.Error("Error closing WAL", "err", err) - } - mem.wal = nil -} - -// Lock locks the mempool. The consensus must be able to hold lock to safely update. -func (mem *Mempool) Lock() { - mem.proxyMtx.Lock() -} - -// Unlock unlocks the mempool. -func (mem *Mempool) Unlock() { - mem.proxyMtx.Unlock() -} - -// Size returns the number of transactions in the mempool. -func (mem *Mempool) Size() int { - return mem.txs.Len() -} - -// TxsBytes returns the total size of all txs in the mempool. -func (mem *Mempool) TxsBytes() int64 { - return atomic.LoadInt64(&mem.txsBytes) -} - -// FlushAppConn flushes the mempool connection to ensure async reqResCb calls are -// done. E.g. from CheckTx. -func (mem *Mempool) FlushAppConn() error { - return mem.proxyAppConn.FlushSync() -} - -// Flush removes all transactions from the mempool and cache -func (mem *Mempool) Flush() { - mem.proxyMtx.Lock() - defer mem.proxyMtx.Unlock() - - mem.cache.Reset() - - for e := mem.txs.Front(); e != nil; e = e.Next() { - mem.txs.Remove(e) - e.DetachPrev() - } - - mem.txsMap = sync.Map{} - _ = atomic.SwapInt64(&mem.txsBytes, 0) -} - -// TxsFront returns the first transaction in the ordered list for peer -// goroutines to call .NextWait() on. -func (mem *Mempool) TxsFront() *clist.CElement { - return mem.txs.Front() -} - -// TxsWaitChan returns a channel to wait on transactions. It will be closed -// once the mempool is not empty (ie. the internal `mem.txs` has at least one -// element) -func (mem *Mempool) TxsWaitChan() <-chan struct{} { - return mem.txs.WaitChan() -} - -// CheckTx executes a new transaction against the application to determine its validity -// and whether it should be added to the mempool. -// It blocks if we're waiting on Update() or Reap(). -// cb: A callback from the CheckTx command. -// It gets called from another goroutine. -// CONTRACT: Either cb will get called, or err returned. -func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { - return mem.CheckTxWithInfo(tx, cb, TxInfo{PeerID: UnknownPeerID}) -} - -// CheckTxWithInfo performs the same operation as CheckTx, but with extra meta data about the tx. -// Currently this metadata is the peer who sent it, -// used to prevent the tx from being gossiped back to them. -func (mem *Mempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) (err error) { - mem.proxyMtx.Lock() - // use defer to unlock mutex because application (*local client*) might panic - defer mem.proxyMtx.Unlock() - - var ( - memSize = mem.Size() - txsBytes = mem.TxsBytes() - ) - if memSize >= mem.config.Size || - int64(len(tx))+txsBytes > mem.config.MaxTxsBytes { - return ErrMempoolIsFull{ - memSize, mem.config.Size, - txsBytes, mem.config.MaxTxsBytes} - } - - // The size of the corresponding amino-encoded TxMessage - // can't be larger than the maxMsgSize, otherwise we can't - // relay it to peers. - if len(tx) > maxTxSize { - return ErrTxTooLarge - } - - if mem.preCheck != nil { - if err := mem.preCheck(tx); err != nil { - return ErrPreCheck{err} - } - } - - // CACHE - if !mem.cache.Push(tx) { - // Record a new sender for a tx we've already seen. - // Note it's possible a tx is still in the cache but no longer in the mempool - // (eg. after committing a block, txs are removed from mempool but not cache), - // so we only record the sender for txs still in the mempool. - if e, ok := mem.txsMap.Load(txKey(tx)); ok { - memTx := e.(*clist.CElement).Value.(*mempoolTx) - if _, loaded := memTx.senders.LoadOrStore(txInfo.PeerID, true); loaded { - // TODO: consider punishing peer for dups, - // its non-trivial since invalid txs can become valid, - // but they can spam the same tx with little cost to them atm. - } - } - - return ErrTxInCache - } - // END CACHE - - // WAL - if mem.wal != nil { - // TODO: Notify administrators when WAL fails - _, err := mem.wal.Write([]byte(tx)) - if err != nil { - mem.logger.Error("Error writing to WAL", "err", err) - } - _, err = mem.wal.Write([]byte("\n")) - if err != nil { - mem.logger.Error("Error writing to WAL", "err", err) - } - } - // END WAL - - // NOTE: proxyAppConn may error if tx buffer is full - if err = mem.proxyAppConn.Error(); err != nil { - return err - } - - reqRes := mem.proxyAppConn.CheckTxAsync(tx) - reqRes.SetCallback(mem.reqResCb(tx, txInfo.PeerID, cb)) - - return nil -} - -// Global callback that will be called after every ABCI response. -// Having a single global callback avoids needing to set a callback for each request. -// However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), -// and peerID is not included in the ABCI request, so we have to set request-specific callbacks that -// include this information. If we're not in the midst of a recheck, this function will just return, -// so the request specific callback can do the work. -// When rechecking, we don't need the peerID, so the recheck callback happens here. -func (mem *Mempool) globalCb(req *abci.Request, res *abci.Response) { - if mem.recheckCursor == nil { - return - } - - mem.metrics.RecheckTimes.Add(1) - mem.resCbRecheck(req, res) - - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) -} - -// Request specific callback that should be set on individual reqRes objects -// to incorporate local information when processing the response. -// This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. -// NOTE: alternatively, we could include this information in the ABCI request itself. -// -// External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called -// when all other response processing is complete. -// -// Used in CheckTxWithInfo to record PeerID who sent us the tx. -func (mem *Mempool) reqResCb(tx []byte, peerID uint16, externalCb func(*abci.Response)) func(res *abci.Response) { - return func(res *abci.Response) { - if mem.recheckCursor != nil { - // this should never happen - panic("recheck cursor is not nil in reqResCb") - } - - mem.resCbFirstTime(tx, peerID, res) - - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) - - // passed in by the caller of CheckTx, eg. the RPC - if externalCb != nil { - externalCb(res) - } - } -} - -// Called from: -// - resCbFirstTime (lock not held) if tx is valid -func (mem *Mempool) addTx(memTx *mempoolTx) { - e := mem.txs.PushBack(memTx) - mem.txsMap.Store(txKey(memTx.tx), e) - atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) - mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) -} - -// Called from: -// - Update (lock held) if tx was committed -// - resCbRecheck (lock not held) if tx was invalidated -func (mem *Mempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { - mem.txs.Remove(elem) - elem.DetachPrev() - mem.txsMap.Delete(txKey(tx)) - atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) - - if removeFromCache { - mem.cache.Remove(tx) - } -} - -// callback, which is called after the app checked the tx for the first time. -// -// The case where the app checks the tx for the second and subsequent times is -// handled by the resCbRecheck callback. -func (mem *Mempool) resCbFirstTime(tx []byte, peerID uint16, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - memTx := &mempoolTx{ - height: mem.height, - gasWanted: r.CheckTx.GasWanted, - tx: tx, - } - memTx.senders.Store(peerID, true) - mem.addTx(memTx) - mem.logger.Info("Added good transaction", - "tx", TxID(tx), - "res", r, - "height", memTx.height, - "total", mem.Size(), - ) - mem.notifyTxsAvailable() - } else { - // ignore bad transaction - mem.logger.Info("Rejected bad transaction", "tx", TxID(tx), "res", r, "err", postCheckErr) - mem.metrics.FailedTxs.Add(1) - // remove from cache (it might be good later) - mem.cache.Remove(tx) - } - default: - // ignore other messages - } -} - -// callback, which is called after the app rechecked the tx. -// -// The case where the app checks the tx for the first time is handled by the -// resCbFirstTime callback. -func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - tx := req.GetCheckTx().Tx - memTx := mem.recheckCursor.Value.(*mempoolTx) - if !bytes.Equal(tx, memTx.tx) { - panic(fmt.Sprintf( - "Unexpected tx response from proxy during recheck\nExpected %X, got %X", - memTx.tx, - tx)) - } - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - // Good, nothing to do. - } else { - // Tx became invalidated due to newly committed block. - mem.logger.Info("Tx is no longer valid", "tx", TxID(tx), "res", r, "err", postCheckErr) - // NOTE: we remove tx from the cache because it might be good later - mem.removeTx(tx, mem.recheckCursor, true) - } - if mem.recheckCursor == mem.recheckEnd { - mem.recheckCursor = nil - } else { - mem.recheckCursor = mem.recheckCursor.Next() - } - if mem.recheckCursor == nil { - // Done! - atomic.StoreInt32(&mem.rechecking, 0) - mem.logger.Info("Done rechecking txs") - - // incase the recheck removed all txs - if mem.Size() > 0 { - mem.notifyTxsAvailable() - } - } - default: - // ignore other messages - } -} - -// TxsAvailable returns a channel which fires once for every height, -// and only when transactions are available in the mempool. -// NOTE: the returned channel may be nil if EnableTxsAvailable was not called. -func (mem *Mempool) TxsAvailable() <-chan struct{} { - return mem.txsAvailable -} - -func (mem *Mempool) notifyTxsAvailable() { - if mem.Size() == 0 { - panic("notified txs available but mempool is empty!") - } - if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { - // channel cap is 1, so this will send once - mem.notifiedTxsAvailable = true - select { - case mem.txsAvailable <- struct{}{}: - default: - } - } -} - -// ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes bytes total -// with the condition that the total gasWanted must be less than maxGas. -// If both maxes are negative, there is no cap on the size of all returned -// transactions (~ all available transactions). -func (mem *Mempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { - mem.proxyMtx.Lock() - defer mem.proxyMtx.Unlock() - - for atomic.LoadInt32(&mem.rechecking) > 0 { - // TODO: Something better? - time.Sleep(time.Millisecond * 10) - } - - var totalBytes int64 - var totalGas int64 - // TODO: we will get a performance boost if we have a good estimate of avg - // size per tx, and set the initial capacity based off of that. - // txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max/mem.avgTxSize)) - txs := make([]types.Tx, 0, mem.txs.Len()) - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - // Check total size requirement - aminoOverhead := types.ComputeAminoOverhead(memTx.tx, 1) - if maxBytes > -1 && totalBytes+int64(len(memTx.tx))+aminoOverhead > maxBytes { - return txs - } - totalBytes += int64(len(memTx.tx)) + aminoOverhead - // Check total gas requirement. - // If maxGas is negative, skip this check. - // Since newTotalGas < masGas, which - // must be non-negative, it follows that this won't overflow. - newTotalGas := totalGas + memTx.gasWanted - if maxGas > -1 && newTotalGas > maxGas { - return txs - } - totalGas = newTotalGas - txs = append(txs, memTx.tx) - } - return txs -} - -// ReapMaxTxs reaps up to max transactions from the mempool. -// If max is negative, there is no cap on the size of all returned -// transactions (~ all available transactions). -func (mem *Mempool) ReapMaxTxs(max int) types.Txs { - mem.proxyMtx.Lock() - defer mem.proxyMtx.Unlock() - - if max < 0 { - max = mem.txs.Len() - } - - for atomic.LoadInt32(&mem.rechecking) > 0 { - // TODO: Something better? - time.Sleep(time.Millisecond * 10) - } - - txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max)) - for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { - memTx := e.Value.(*mempoolTx) - txs = append(txs, memTx.tx) - } - return txs -} - -// Update informs the mempool that the given txs were committed and can be discarded. -// NOTE: this should be called *after* block is committed by consensus. -// NOTE: unsafe; Lock/Unlock must be managed by caller -func (mem *Mempool) Update( - height int64, - txs types.Txs, - preCheck PreCheckFunc, - postCheck PostCheckFunc, -) error { - // Set height - mem.height = height - mem.notifiedTxsAvailable = false - - if preCheck != nil { - mem.preCheck = preCheck - } - if postCheck != nil { - mem.postCheck = postCheck - } - - // Add committed transactions to cache (if missing). - for _, tx := range txs { - _ = mem.cache.Push(tx) - } - - // Remove committed transactions. - txsLeft := mem.removeTxs(txs) - - // Either recheck non-committed txs to see if they became invalid - // or just notify there're some txs left. - if len(txsLeft) > 0 { - if mem.config.Recheck { - mem.logger.Info("Recheck txs", "numtxs", len(txsLeft), "height", height) - mem.recheckTxs(txsLeft) - // At this point, mem.txs are being rechecked. - // mem.recheckCursor re-scans mem.txs and possibly removes some txs. - // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. - } else { - mem.notifyTxsAvailable() - } - } - - // Update metrics - mem.metrics.Size.Set(float64(mem.Size())) - - return nil -} - -func (mem *Mempool) removeTxs(txs types.Txs) []types.Tx { - // Build a map for faster lookups. - txsMap := make(map[string]struct{}, len(txs)) - for _, tx := range txs { - txsMap[string(tx)] = struct{}{} - } - - txsLeft := make([]types.Tx, 0, mem.txs.Len()) - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - // Remove the tx if it's already in a block. - if _, ok := txsMap[string(memTx.tx)]; ok { - // NOTE: we don't remove committed txs from the cache. - mem.removeTx(memTx.tx, e, false) - - continue - } - txsLeft = append(txsLeft, memTx.tx) - } - return txsLeft -} - -// NOTE: pass in txs because mem.txs can mutate concurrently. -func (mem *Mempool) recheckTxs(txs []types.Tx) { - if len(txs) == 0 { - return - } - atomic.StoreInt32(&mem.rechecking, 1) - mem.recheckCursor = mem.txs.Front() - mem.recheckEnd = mem.txs.Back() - - // Push txs to proxyAppConn - // NOTE: globalCb may be called concurrently. - for _, tx := range txs { - mem.proxyAppConn.CheckTxAsync(tx) - } - mem.proxyAppConn.FlushAsync() -} - -//-------------------------------------------------------------------------------- - -// mempoolTx is a transaction that successfully ran -type mempoolTx struct { - height int64 // height that this tx had been validated in - gasWanted int64 // amount of gas this tx states it will require - tx types.Tx // - - // ids of peers who've sent us this tx (as a map for quick lookups). - // senders: PeerID -> bool - senders sync.Map -} - -// Height returns the height for this transaction -func (memTx *mempoolTx) Height() int64 { - return atomic.LoadInt64(&memTx.height) -} - -//-------------------------------------------------------------------------------- - -type txCache interface { - Reset() - Push(tx types.Tx) bool - Remove(tx types.Tx) -} - -// mapTxCache maintains a LRU cache of transactions. This only stores the hash -// of the tx, due to memory concerns. -type mapTxCache struct { - mtx sync.Mutex - size int - map_ map[[sha256.Size]byte]*list.Element - list *list.List -} - -var _ txCache = (*mapTxCache)(nil) - -// newMapTxCache returns a new mapTxCache. -func newMapTxCache(cacheSize int) *mapTxCache { - return &mapTxCache{ - size: cacheSize, - map_: make(map[[sha256.Size]byte]*list.Element, cacheSize), - list: list.New(), - } -} - -// Reset resets the cache to an empty state. -func (cache *mapTxCache) Reset() { - cache.mtx.Lock() - cache.map_ = make(map[[sha256.Size]byte]*list.Element, cache.size) - cache.list.Init() - cache.mtx.Unlock() -} - -// Push adds the given tx to the cache and returns true. It returns -// false if tx is already in the cache. -func (cache *mapTxCache) Push(tx types.Tx) bool { - cache.mtx.Lock() - defer cache.mtx.Unlock() - - // Use the tx hash in the cache - txHash := txKey(tx) - if moved, exists := cache.map_[txHash]; exists { - cache.list.MoveToBack(moved) - return false - } - - if cache.list.Len() >= cache.size { - popped := cache.list.Front() - poppedTxHash := popped.Value.([sha256.Size]byte) - delete(cache.map_, poppedTxHash) - if popped != nil { - cache.list.Remove(popped) - } - } - e := cache.list.PushBack(txHash) - cache.map_[txHash] = e - return true -} - -// Remove removes the given tx from the cache. -func (cache *mapTxCache) Remove(tx types.Tx) { - cache.mtx.Lock() - txHash := txKey(tx) - popped := cache.map_[txHash] - delete(cache.map_, txHash) - if popped != nil { - cache.list.Remove(popped) - } - - cache.mtx.Unlock() -} - -type nopTxCache struct{} - -var _ txCache = (*nopTxCache)(nil) - -func (nopTxCache) Reset() {} -func (nopTxCache) Push(types.Tx) bool { return true } -func (nopTxCache) Remove(types.Tx) {} diff --git a/mempool/reactor.go b/mempool/reactor.go index e1376b28..65ccd7df 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -31,13 +31,13 @@ const ( maxActiveIDs = math.MaxUint16 ) -// MempoolReactor handles mempool tx broadcasting amongst peers. +// Reactor handles mempool tx broadcasting amongst peers. // It maintains a map from peer ID to counter, to prevent gossiping txs to the // peers you received it from. -type MempoolReactor struct { +type Reactor struct { p2p.BaseReactor config *cfg.MempoolConfig - Mempool *Mempool + mempool *CListMempool ids *mempoolIDs } @@ -104,25 +104,25 @@ func newMempoolIDs() *mempoolIDs { } } -// NewMempoolReactor returns a new MempoolReactor with the given config and mempool. -func NewMempoolReactor(config *cfg.MempoolConfig, mempool *Mempool) *MempoolReactor { - memR := &MempoolReactor{ +// NewReactor returns a new Reactor with the given config and mempool. +func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor { + memR := &Reactor{ config: config, - Mempool: mempool, + mempool: mempool, ids: newMempoolIDs(), } - memR.BaseReactor = *p2p.NewBaseReactor("MempoolReactor", memR) + memR.BaseReactor = *p2p.NewBaseReactor("Reactor", memR) return memR } -// SetLogger sets the Logger on the reactor and the underlying Mempool. -func (memR *MempoolReactor) SetLogger(l log.Logger) { +// SetLogger sets the Logger on the reactor and the underlying mempool. +func (memR *Reactor) SetLogger(l log.Logger) { memR.Logger = l - memR.Mempool.SetLogger(l) + memR.mempool.SetLogger(l) } // OnStart implements p2p.BaseReactor. -func (memR *MempoolReactor) OnStart() error { +func (memR *Reactor) OnStart() error { if !memR.config.Broadcast { memR.Logger.Info("Tx broadcasting is disabled") } @@ -131,7 +131,7 @@ func (memR *MempoolReactor) OnStart() error { // GetChannels implements Reactor. // It returns the list of channels for this reactor. -func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor { +func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { return []*p2p.ChannelDescriptor{ { ID: MempoolChannel, @@ -142,20 +142,20 @@ func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor { // AddPeer implements Reactor. // It starts a broadcast routine ensuring all txs are forwarded to the given peer. -func (memR *MempoolReactor) AddPeer(peer p2p.Peer) { +func (memR *Reactor) AddPeer(peer p2p.Peer) { memR.ids.ReserveForPeer(peer) go memR.broadcastTxRoutine(peer) } // RemovePeer implements Reactor. -func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) { +func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { memR.ids.Reclaim(peer) // broadcast routine checks if peer is gone and returns } // Receive implements Reactor. // It adds any received transactions to the mempool. -func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { +func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { msg, err := decodeMsg(msgBytes) if err != nil { memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) @@ -167,9 +167,9 @@ func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { switch msg := msg.(type) { case *TxMessage: peerID := memR.ids.GetForPeer(src) - err := memR.Mempool.CheckTxWithInfo(msg.Tx, nil, TxInfo{PeerID: peerID}) + err := memR.mempool.CheckTxWithInfo(msg.Tx, nil, TxInfo{SenderID: peerID}) if err != nil { - memR.Logger.Info("Could not check tx", "tx", TxID(msg.Tx), "err", err) + memR.Logger.Info("Could not check tx", "tx", txID(msg.Tx), "err", err) } // broadcasting happens from go routines per peer default: @@ -183,7 +183,7 @@ type PeerState interface { } // Send new mempool txs to peer. -func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) { +func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { if !memR.config.Broadcast { return } @@ -200,8 +200,8 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) { // start from the beginning. if next == nil { select { - case <-memR.Mempool.TxsWaitChan(): // Wait until a tx is available - if next = memR.Mempool.TxsFront(); next == nil { + case <-memR.mempool.TxsWaitChan(): // Wait until a tx is available + if next = memR.mempool.TxsFront(); next == nil { continue } case <-peer.Quit(): @@ -255,7 +255,7 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) { //----------------------------------------------------------------------------- // Messages -// MempoolMessage is a message sent or received by the MempoolReactor. +// MempoolMessage is a message sent or received by the Reactor. type MempoolMessage interface{} func RegisterMempoolMessages(cdc *amino.Codec) { diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index c9cf4980..94c0d190 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -1,7 +1,6 @@ package mempool import ( - "fmt" "net" "sync" "testing" @@ -43,8 +42,8 @@ func mempoolLogger() log.Logger { } // connect N mempool reactors through N switches -func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor { - reactors := make([]*MempoolReactor, N) +func makeAndConnectReactors(config *cfg.Config, N int) []*Reactor { + reactors := make([]*Reactor, N) logger := mempoolLogger() for i := 0; i < N; i++ { app := kvstore.NewKVStoreApplication() @@ -52,7 +51,7 @@ func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor mempool, cleanup := newMempoolWithApp(cc) defer cleanup() - reactors[i] = NewMempoolReactor(config.Mempool, mempool) // so we dont start the consensus states + reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states reactors[i].SetLogger(logger.With("validator", i)) } @@ -64,13 +63,15 @@ func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor return reactors } -// wait for all txs on all reactors -func waitForTxs(t *testing.T, txs types.Txs, reactors []*MempoolReactor) { +func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) { // wait for the txs in all mempools wg := new(sync.WaitGroup) - for i := 0; i < len(reactors); i++ { + for i, reactor := range reactors { wg.Add(1) - go _waitForTxs(t, wg, txs, i, reactors) + go func(r *Reactor, reactorIndex int) { + defer wg.Done() + waitForTxsOnReactor(t, txs, r, reactorIndex) + }(reactor, i) } done := make(chan struct{}) @@ -87,25 +88,23 @@ func waitForTxs(t *testing.T, txs types.Txs, reactors []*MempoolReactor) { } } -// wait for all txs on a single mempool -func _waitForTxs(t *testing.T, wg *sync.WaitGroup, txs types.Txs, reactorIdx int, reactors []*MempoolReactor) { - - mempool := reactors[reactorIdx].Mempool - for mempool.Size() != len(txs) { +func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) { + mempool := reactor.mempool + for mempool.Size() < len(txs) { time.Sleep(time.Millisecond * 100) } reapedTxs := mempool.ReapMaxTxs(len(txs)) for i, tx := range txs { - assert.Equal(t, tx, reapedTxs[i], fmt.Sprintf("txs at index %d on reactor %d don't match: %v vs %v", i, reactorIdx, tx, reapedTxs[i])) + assert.Equalf(t, tx, reapedTxs[i], + "txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i]) } - wg.Done() } // ensure no txs on reactor after some timeout -func ensureNoTxs(t *testing.T, reactor *MempoolReactor, timeout time.Duration) { +func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) { time.Sleep(timeout) // wait for the txs in all mempools - assert.Zero(t, reactor.Mempool.Size()) + assert.Zero(t, reactor.mempool.Size()) } const ( @@ -116,7 +115,7 @@ const ( func TestReactorBroadcastTxMessage(t *testing.T) { config := cfg.TestConfig() const N = 4 - reactors := makeAndConnectMempoolReactors(config, N) + reactors := makeAndConnectReactors(config, N) defer func() { for _, r := range reactors { r.Stop() @@ -130,14 +129,14 @@ func TestReactorBroadcastTxMessage(t *testing.T) { // send a bunch of txs to the first reactor's mempool // and wait for them all to be received in the others - txs := checkTxs(t, reactors[0].Mempool, NUM_TXS, UnknownPeerID) - waitForTxs(t, txs, reactors) + txs := checkTxs(t, reactors[0].mempool, NUM_TXS, UnknownPeerID) + waitForTxsOnReactors(t, txs, reactors) } func TestReactorNoBroadcastToSender(t *testing.T) { config := cfg.TestConfig() const N = 2 - reactors := makeAndConnectMempoolReactors(config, N) + reactors := makeAndConnectReactors(config, N) defer func() { for _, r := range reactors { r.Stop() @@ -146,7 +145,7 @@ func TestReactorNoBroadcastToSender(t *testing.T) { // send a bunch of txs to the first reactor's mempool, claiming it came from peer // ensure peer gets no txs - checkTxs(t, reactors[0].Mempool, NUM_TXS, 1) + checkTxs(t, reactors[0].mempool, NUM_TXS, 1) ensureNoTxs(t, reactors[1], 100*time.Millisecond) } @@ -157,7 +156,7 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { config := cfg.TestConfig() const N = 2 - reactors := makeAndConnectMempoolReactors(config, N) + reactors := makeAndConnectReactors(config, N) defer func() { for _, r := range reactors { r.Stop() @@ -180,7 +179,7 @@ func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) { config := cfg.TestConfig() const N = 2 - reactors := makeAndConnectMempoolReactors(config, N) + reactors := makeAndConnectReactors(config, N) // stop reactors for _, r := range reactors { diff --git a/mock/mempool.go b/mock/mempool.go new file mode 100644 index 00000000..cebe156b --- /dev/null +++ b/mock/mempool.go @@ -0,0 +1,46 @@ +package mock + +import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/clist" + mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/types" +) + +// Mempool is an empty implementation of a Mempool, useful for testing. +type Mempool struct{} + +var _ mempl.Mempool = Mempool{} + +func (Mempool) Lock() {} +func (Mempool) Unlock() {} +func (Mempool) Size() int { return 0 } +func (Mempool) CheckTx(_ types.Tx, _ func(*abci.Response)) error { + return nil +} +func (Mempool) CheckTxWithInfo(_ types.Tx, _ func(*abci.Response), + _ mempl.TxInfo) error { + return nil +} +func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } +func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } +func (Mempool) Update( + _ int64, + _ types.Txs, + _ []*abci.ResponseDeliverTx, + _ mempl.PreCheckFunc, + _ mempl.PostCheckFunc, +) error { + return nil +} +func (Mempool) Flush() {} +func (Mempool) FlushAppConn() error { return nil } +func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (Mempool) EnableTxsAvailable() {} +func (Mempool) TxsBytes() int64 { return 0 } + +func (Mempool) TxsFront() *clist.CElement { return nil } +func (Mempool) TxsWaitChan() <-chan struct{} { return nil } + +func (Mempool) InitWAL() {} +func (Mempool) CloseWAL() {} diff --git a/networks/remote/integration.sh b/networks/remote/integration.sh index 8150aad4..c2d7c3a3 100644 --- a/networks/remote/integration.sh +++ b/networks/remote/integration.sh @@ -30,9 +30,7 @@ go get $REPO cd $GOPATH/src/$REPO ## build -git checkout zach/ansible make get_tools -make get_vendor_deps make build # generate an ssh key @@ -84,8 +82,11 @@ ip3=$(strip $ip3) # all the ansible commands are also directory specific cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible +# create config dirs +tendermint testnet + ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml -ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples +ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible/mytestnet sleep 10 diff --git a/node/node.go b/node/node.go index e0d4924a..b24fa43b 100644 --- a/node/node.go +++ b/node/node.go @@ -21,6 +21,7 @@ import ( bc "github.com/tendermint/tendermint/blockchain" bcexp "github.com/tendermint/tendermint/blockchainexp" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/consensus" cs "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/evidence" @@ -98,7 +99,7 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { if _, err := os.Stat(oldPrivVal); !os.IsNotExist(err) { oldPV, err := privval.LoadOldFilePV(oldPrivVal) if err != nil { - return nil, fmt.Errorf("Error reading OldPrivValidator from %v: %v\n", oldPrivVal, err) + return nil, fmt.Errorf("error reading OldPrivValidator from %v: %v\n", oldPrivVal, err) } logger.Info("Upgrading PrivValidator file", "old", oldPrivVal, @@ -159,11 +160,13 @@ type Node struct { // services eventBus *types.EventBus // pub/sub for services stateDB dbm.DB - blockStore *tmstore.BlockStore // store the blockchain to disk - bcReactor p2p.Reactor // for fast-syncing - mempoolReactor *mempl.MempoolReactor // for gossipping transactions + blockStore *tmstore.BlockStore // store the blockchain to disk + bcReactor p2p.Reactor // for fast-syncing + mempoolReactor *mempl.Reactor // for gossipping transactions + mempool mempl.Mempool consensusState *cs.ConsensusState // latest consensus state consensusReactor *cs.ConsensusReactor // for participating in the consensus + pexReactor *pex.PEXReactor // for exchanging peer addresses evidencePool *evidence.EvidencePool // tracking evidence proxyApp proxy.AppConns // connection to the application rpcListeners []net.Listener // rpc servers @@ -172,73 +175,49 @@ type Node struct { prometheusSrv *http.Server } -// NewNode returns a new, ready to go, Tendermint Node. -func NewNode(config *cfg.Config, - privValidator types.PrivValidator, - nodeKey *p2p.NodeKey, - clientCreator proxy.ClientCreator, - genesisDocProvider GenesisDocProvider, - dbProvider DBProvider, - metricsProvider MetricsProvider, - logger log.Logger) (*Node, error) { - - // Get BlockStore - blockStoreDB, err := dbProvider(&DBContext{"blockstore", config}) +func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *tmstore.BlockStore, stateDB dbm.DB, err error) { + var blockStoreDB dbm.DB + blockStoreDB, err = dbProvider(&DBContext{"blockstore", config}) if err != nil { - return nil, err + return } - blockStore := tmstore.NewBlockStore(blockStoreDB) + blockStore = tmstore.NewBlockStore(blockStoreDB) - // Get State - stateDB, err := dbProvider(&DBContext{"state", config}) + stateDB, err = dbProvider(&DBContext{"state", config}) if err != nil { - return nil, err + return } - // Get genesis doc - // TODO: move to state package? - genDoc, err := loadGenesisDoc(stateDB) - if err != nil { - genDoc, err = genesisDocProvider() - if err != nil { - return nil, err - } - // save genesis doc to prevent a certain class of user errors (e.g. when it - // was changed, accidentally or not). Also good for audit trail. - saveGenesisDoc(stateDB, genDoc) - } + return +} - state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) - if err != nil { - return nil, err - } - - // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). +func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) { proxyApp := proxy.NewAppConns(clientCreator) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { - return nil, fmt.Errorf("Error starting proxy app connections: %v", err) + return nil, fmt.Errorf("error starting proxy app connections: %v", err) } + return proxyApp, nil +} - // EventBus and IndexerService must be started before the handshake because - // we might need to index the txs of the replayed block as this might not have happened - // when the node stopped last time (i.e. the node stopped after it saved the block - // but before it indexed the txs, or, endblocker panicked) +func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { eventBus := types.NewEventBus() eventBus.SetLogger(logger.With("module", "events")) - - err = eventBus.Start() - if err != nil { + if err := eventBus.Start(); err != nil { return nil, err } + return eventBus, nil +} + +func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider, + eventBus *types.EventBus, logger log.Logger) (*txindex.IndexerService, txindex.TxIndexer, error) { - // Transaction indexing var txIndexer txindex.TxIndexer switch config.TxIndex.Indexer { case "kv": store, err := dbProvider(&DBContext{"tx_index", config}) if err != nil { - return nil, err + return nil, nil, err } if config.TxIndex.IndexTags != "" { txIndexer = kv.NewTxIndex(store, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " "))) @@ -253,26 +232,26 @@ func NewNode(config *cfg.Config, indexerService := txindex.NewIndexerService(txIndexer, eventBus) indexerService.SetLogger(logger.With("module", "txindex")) - - err = indexerService.Start() - if err != nil { - return nil, err + if err := indexerService.Start(); err != nil { + return nil, nil, err } + return indexerService, txIndexer, nil +} + +func doHandshake(stateDB dbm.DB, state sm.State, blockStore sm.BlockStore, + genDoc *types.GenesisDoc, eventBus *types.EventBus, proxyApp proxy.AppConns, consensusLogger log.Logger) error { - // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, - // and replays any blocks as necessary to sync tendermint with the app. - consensusLogger := logger.With("module", "consensus") handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc) handshaker.SetLogger(consensusLogger) handshaker.SetEventBus(eventBus) if err := handshaker.Handshake(proxyApp); err != nil { - return nil, fmt.Errorf("Error during handshake: %v", err) + return fmt.Errorf("error during handshake: %v", err) } + return nil +} - // Reload the state. It will have the Version.Consensus.App set by the - // Handshake, and may have other modifications as well (ie. depending on - // what happened during block replay). - state = sm.LoadState(stateDB) +func logNodeStartupInfo(state sm.State, privValidator types.PrivValidator, logger, + consensusLogger log.Logger) { // Log the version info. logger.Info("Version info", @@ -289,27 +268,6 @@ func NewNode(config *cfg.Config, ) } - if config.PrivValidatorListenAddr != "" { - // If an address is provided, listen on the socket for a connection from an - // external signing process. - // FIXME: we should start services inside OnStart - privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, logger) - if err != nil { - return nil, errors.Wrap(err, "Error with private validator socket client") - } - } - - // Decide whether to fast-sync or not - // We don't fast-sync when the only validator is us. - fastSync := config.FastSync - if state.Validators.Size() == 1 { - addr, _ := state.Validators.GetByIndex(0) - privValAddr := privValidator.GetPubKey().Address() - if bytes.Equal(privValAddr, addr) { - fastSync = false - } - } - pubKey := privValidator.GetPubKey() addr := pubKey.Address() // Log whether this node is a validator or an observer @@ -318,11 +276,20 @@ func NewNode(config *cfg.Config, } else { consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey) } +} - csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) +func onlyValidatorIsUs(state sm.State, privVal types.PrivValidator) bool { + if state.Validators.Size() > 1 { + return false + } + addr, _ := state.Validators.GetByIndex(0) + return bytes.Equal(privVal.GetPubKey().Address(), addr) +} - // Make MempoolReactor - mempool := mempl.NewMempool( +func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, + state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) { + + mempool := mempl.NewCListMempool( config.Mempool, proxyApp.Mempool(), state.LastBlockHeight, @@ -331,51 +298,42 @@ func NewNode(config *cfg.Config, mempl.WithPostCheck(sm.TxPostCheck(state)), ) mempoolLogger := logger.With("module", "mempool") - mempool.SetLogger(mempoolLogger) - if config.Mempool.WalEnabled() { - mempool.InitWAL() // no need to have the mempool wal during tests - } - mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool) + mempoolReactor := mempl.NewReactor(config.Mempool, mempool) mempoolReactor.SetLogger(mempoolLogger) if config.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } + return mempoolReactor, mempool +} + +func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, + stateDB dbm.DB, logger log.Logger) (*evidence.EvidenceReactor, *evidence.EvidencePool, error) { - // Make Evidence Reactor evidenceDB, err := dbProvider(&DBContext{"evidence", config}) if err != nil { - return nil, err + return nil, nil, err } evidenceLogger := logger.With("module", "evidence") evidencePool := evidence.NewEvidencePool(stateDB, evidenceDB) evidencePool.SetLogger(evidenceLogger) evidenceReactor := evidence.NewEvidenceReactor(evidencePool) evidenceReactor.SetLogger(evidenceLogger) + return evidenceReactor, evidencePool, nil +} - blockExecLogger := logger.With("module", "state") - // make block executor for consensus and blockchain reactors to execute blocks - blockExec := sm.NewBlockExecutor( - stateDB, - blockExecLogger, - proxyApp.Consensus(), - mempool, - evidencePool, - sm.BlockExecutorWithMetrics(smMetrics), - ) +func createConsensusReactor(config *cfg.Config, + state sm.State, + blockExec *sm.BlockExecutor, + blockStore sm.BlockStore, + mempool *mempl.CListMempool, + evidencePool *evidence.EvidencePool, + privValidator types.PrivValidator, + csMetrics *cs.Metrics, + fastSync bool, + eventBus *types.EventBus, + consensusLogger log.Logger) (*consensus.ConsensusReactor, *consensus.ConsensusState) { - var bcReactor p2p.Reactor - // Make BlockchainReactor - switch config.FastSyncParams.Version { - case "experimental": - bcReactor = bcexp.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) - default: - bcReactor = bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) - } - - bcReactor.SetLogger(logger.With("module", "blockchain")) - - // Make ConsensusReactor consensusState := cs.NewConsensusState( config.Consensus, state.Copy(), @@ -391,28 +349,13 @@ func NewNode(config *cfg.Config, } consensusReactor := cs.NewConsensusReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics)) consensusReactor.SetLogger(consensusLogger) - // services which will be publishing and/or subscribing for messages (events) // consensusReactor will set it on consensusState and blockExecutor consensusReactor.SetEventBus(eventBus) + return consensusReactor, consensusState +} - p2pLogger := logger.With("module", "p2p") - nodeInfo, err := makeNodeInfo( - config, - nodeKey.ID(), - txIndexer, - genDoc.ChainID, - p2p.NewProtocolVersion( - version.P2PProtocol, // global - state.Version.Consensus.Block, - state.Version.Consensus.App, - ), - ) - if err != nil { - return nil, err - } - - // Setup Transport. +func createTransport(config *cfg.Config, nodeInfo p2p.NodeInfo, nodeKey *p2p.NodeKey, proxyApp proxy.AppConns) (*p2p.MultiplexTransport, []p2p.PeerFilterFunc) { var ( mConnConfig = p2p.MConnConfig(config.P2P) transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) @@ -438,7 +381,7 @@ func NewNode(config *cfg.Config, return err } if res.IsErr() { - return fmt.Errorf("Error querying abci app: %v", res) + return fmt.Errorf("error querying abci app: %v", res) } return nil @@ -456,7 +399,7 @@ func NewNode(config *cfg.Config, return err } if res.IsErr() { - return fmt.Errorf("Error querying abci app: %v", res) + return fmt.Errorf("error querying abci app: %v", res) } return nil @@ -465,8 +408,21 @@ func NewNode(config *cfg.Config, } p2p.MultiplexTransportConnFilters(connFilters...)(transport) + return transport, peerFilters +} + +func createSwitch(config *cfg.Config, + transport *p2p.MultiplexTransport, + p2pMetrics *p2p.Metrics, + peerFilters []p2p.PeerFilterFunc, + mempoolReactor *mempl.Reactor, + bcReactor p2p.Reactor, + consensusReactor *consensus.ConsensusReactor, + evidenceReactor *evidence.EvidenceReactor, + nodeInfo p2p.NodeInfo, + nodeKey *p2p.NodeKey, + p2pLogger log.Logger) *p2p.Switch { - // Setup Switch. sw := p2p.NewSwitch( config.P2P, transport, @@ -482,6 +438,172 @@ func NewNode(config *cfg.Config, sw.SetNodeKey(nodeKey) p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile()) + return sw +} + +func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, + p2pLogger log.Logger) pex.AddrBook { + + addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) + addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) + + // Add ourselves to addrbook to prevent dialing ourselves + addrBook.AddOurAddress(sw.NetAddress()) + + sw.SetAddrBook(addrBook) + + return addrBook +} + +func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, + sw *p2p.Switch, logger log.Logger) *pex.PEXReactor { + + // TODO persistent peers ? so we can have their DNS addrs saved + pexReactor := pex.NewPEXReactor(addrBook, + &pex.PEXReactorConfig{ + Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), + SeedMode: config.P2P.SeedMode, + // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 + // blocks assuming 10s blocks ~ 28 hours. + // TODO (melekes): make it dynamic based on the actual block latencies + // from the live network. + // https://github.com/tendermint/tendermint/issues/3523 + SeedDisconnectWaitPeriod: 28 * time.Hour, + }) + pexReactor.SetLogger(logger.With("module", "pex")) + sw.AddReactor("PEX", pexReactor) + return pexReactor +} + +// NewNode returns a new, ready to go, Tendermint Node. +func NewNode(config *cfg.Config, + privValidator types.PrivValidator, + nodeKey *p2p.NodeKey, + clientCreator proxy.ClientCreator, + genesisDocProvider GenesisDocProvider, + dbProvider DBProvider, + metricsProvider MetricsProvider, + logger log.Logger) (*Node, error) { + + blockStore, stateDB, err := initDBs(config, dbProvider) + if err != nil { + return nil, err + } + + state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider) + if err != nil { + return nil, err + } + + // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). + proxyApp, err := createAndStartProxyAppConns(clientCreator, logger) + if err != nil { + return nil, err + } + + // EventBus and IndexerService must be started before the handshake because + // we might need to index the txs of the replayed block as this might not have happened + // when the node stopped last time (i.e. the node stopped after it saved the block + // but before it indexed the txs, or, endblocker panicked) + eventBus, err := createAndStartEventBus(logger) + if err != nil { + return nil, err + } + + // Transaction indexing + indexerService, txIndexer, err := createAndStartIndexerService(config, dbProvider, eventBus, logger) + if err != nil { + return nil, err + } + + // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, + // and replays any blocks as necessary to sync tendermint with the app. + consensusLogger := logger.With("module", "consensus") + if err := doHandshake(stateDB, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + return nil, err + } + + // Reload the state. It will have the Version.Consensus.App set by the + // Handshake, and may have other modifications as well (ie. depending on + // what happened during block replay). + state = sm.LoadState(stateDB) + + // If an address is provided, listen on the socket for a connection from an + // external signing process. + if config.PrivValidatorListenAddr != "" { + // FIXME: we should start services inside OnStart + privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, logger) + if err != nil { + return nil, errors.Wrap(err, "error with private validator socket client") + } + } + + logNodeStartupInfo(state, privValidator, logger, consensusLogger) + + // Decide whether to fast-sync or not + // We don't fast-sync when the only validator is us. + fastSync := config.FastSync && !onlyValidatorIsUs(state, privValidator) + + csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) + + // Make MempoolReactor + mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger) + + // Make Evidence Reactor + evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, logger) + if err != nil { + return nil, err + } + + // make block executor for consensus and blockchain reactors to execute blocks + blockExec := sm.NewBlockExecutor( + stateDB, + logger.With("module", "state"), + proxyApp.Consensus(), + mempool, + evidencePool, + sm.BlockExecutorWithMetrics(smMetrics), + ) + + // Make BlockchainReactor + var bcReactor p2p.Reactor + // Make BlockchainReactor + switch config.FastSyncParams.Version { + case "experimental": + bcReactor = bcexp.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) + default: + bcReactor = bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) + } + + bcReactor.SetLogger(logger.With("module", "blockchain")) + + // Make ConsensusReactor + consensusReactor, consensusState := createConsensusReactor( + config, state, blockExec, blockStore, mempool, evidencePool, + privValidator, csMetrics, fastSync, eventBus, consensusLogger, + ) + + nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state) + if err != nil { + return nil, err + } + + // Setup Transport. + transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp) + + // Setup Switch. + p2pLogger := logger.With("module", "p2p") + sw := createSwitch( + config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, + consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, + ) + + err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) + if err != nil { + return nil, errors.Wrap(err, "could not add peers from persistent_peers field") + } + + addrBook := createAddrBookAndSetOnSwitch(config, sw, p2pLogger) // Optionally, start the pex reactor // @@ -495,37 +617,13 @@ func NewNode(config *cfg.Config, // // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. // Note we currently use the addrBook regardless at least for AddOurAddress - addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) - - // Add ourselves to addrbook to prevent dialing ourselves - addrBook.AddOurAddress(sw.NetAddress()) - - addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) + var pexReactor *pex.PEXReactor if config.P2P.PexReactor { - // TODO persistent peers ? so we can have their DNS addrs saved - pexReactor := pex.NewPEXReactor(addrBook, - &pex.PEXReactorConfig{ - Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), - SeedMode: config.P2P.SeedMode, - // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 - // blocks assuming 10s blocks ~ 28 hours. - // TODO (melekes): make it dynamic based on the actual block latencies - // from the live network. - // https://github.com/tendermint/tendermint/issues/3523 - SeedDisconnectWaitPeriod: 28 * time.Hour, - }) - pexReactor.SetLogger(logger.With("module", "pex")) - sw.AddReactor("PEX", pexReactor) + pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) } - sw.SetAddrBook(addrBook) - - // run the profile server - profileHost := config.ProfListenAddress - if profileHost != "" { - go func() { - logger.Error("Profile server", "err", http.ListenAndServe(profileHost, nil)) - }() + if config.ProfListenAddress != "" { + go logger.Error("Profile server", "err", http.ListenAndServe(config.ProfListenAddress, nil)) } node := &Node{ @@ -543,8 +641,10 @@ func NewNode(config *cfg.Config, blockStore: blockStore, bcReactor: bcReactor, mempoolReactor: mempoolReactor, + mempool: mempool, consensusState: consensusState, consensusReactor: consensusReactor, + pexReactor: pexReactor, evidencePool: evidencePool, proxyApp: proxyApp, txIndexer: txIndexer, @@ -583,7 +683,7 @@ func (n *Node) OnStart() error { } // Start the transport. - addr, err := p2p.NewNetAddressStringWithOptionalID(n.config.P2P.ListenAddress) + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress)) if err != nil { return err } @@ -593,6 +693,10 @@ func (n *Node) OnStart() error { n.isListening = true + if n.config.Mempool.WalEnabled() { + n.mempool.InitWAL() // no need to have the mempool wal during tests + } + // Start the switch (the P2P server). err = n.sw.Start() if err != nil { @@ -600,11 +704,9 @@ func (n *Node) OnStart() error { } // Always connect to persistent peers - if n.config.P2P.PersistentPeers != "" { - err = n.sw.DialPeersAsync(n.addrBook, splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "), true) - if err != nil { - return err - } + err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) + if err != nil { + return errors.Wrap(err, "could not dial peers from persistent_peers field") } return nil @@ -621,12 +723,11 @@ func (n *Node) OnStop() { n.indexerService.Stop() // now stop the reactors - // TODO: gracefully disconnect from peers. n.sw.Stop() // stop mempool WAL if n.config.Mempool.WalEnabled() { - n.mempoolReactor.Mempool.CloseWAL() + n.mempool.CloseWAL() } if err := n.transport.Close(); err != nil { @@ -661,7 +762,7 @@ func (n *Node) ConfigureRPC() { rpccore.SetStateDB(n.stateDB) rpccore.SetBlockStore(n.blockStore) rpccore.SetConsensusState(n.consensusState) - rpccore.SetMempool(n.mempoolReactor.Mempool) + rpccore.SetMempool(n.mempool) rpccore.SetEvidencePool(n.evidencePool) rpccore.SetP2PPeers(n.sw) rpccore.SetP2PTransport(n) @@ -808,11 +909,21 @@ func (n *Node) ConsensusReactor() *cs.ConsensusReactor { return n.consensusReactor } -// MempoolReactor returns the Node's MempoolReactor. -func (n *Node) MempoolReactor() *mempl.MempoolReactor { +// MempoolReactor returns the Node's mempool reactor. +func (n *Node) MempoolReactor() *mempl.Reactor { return n.mempoolReactor } +// Mempool returns the Node's mempool. +func (n *Node) Mempool() mempl.Mempool { + return n.mempool +} + +// PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled. +func (n *Node) PEXReactor() *pex.PEXReactor { + return n.pexReactor +} + // EvidencePool returns the Node's EvidencePool. func (n *Node) EvidencePool() *evidence.EvidencePool { return n.evidencePool @@ -863,10 +974,10 @@ func (n *Node) NodeInfo() p2p.NodeInfo { func makeNodeInfo( config *cfg.Config, - nodeID p2p.ID, + nodeKey *p2p.NodeKey, txIndexer txindex.TxIndexer, - chainID string, - protocolVersion p2p.ProtocolVersion, + genDoc *types.GenesisDoc, + state sm.State, ) (p2p.NodeInfo, error) { txIndexerStatus := "on" if _, ok := txIndexer.(*null.TxIndex); ok { @@ -882,10 +993,14 @@ func makeNodeInfo( } nodeInfo := p2p.DefaultNodeInfo{ - ProtocolVersion: protocolVersion, - ID_: nodeID, - Network: chainID, - Version: version.TMCoreSemVer, + ProtocolVersion: p2p.NewProtocolVersion( + version.P2PProtocol, // global + state.Version.Consensus.Block, + state.Version.Consensus.App, + ), + ID_: nodeKey.ID(), + Network: genDoc.ChainID, + Version: version.TMCoreSemVer, Channels: []byte{ bcChannel, cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, @@ -921,27 +1036,50 @@ var ( genesisDocKey = []byte("genesisDoc") ) +// LoadStateFromDBOrGenesisDocProvider attempts to load the state from the +// database, or creates one using the given genesisDocProvider and persists the +// result to the database. On success this also returns the genesis doc loaded +// through the given provider. +func LoadStateFromDBOrGenesisDocProvider(stateDB dbm.DB, genesisDocProvider GenesisDocProvider) (sm.State, *types.GenesisDoc, error) { + // Get genesis doc + genDoc, err := loadGenesisDoc(stateDB) + if err != nil { + genDoc, err = genesisDocProvider() + if err != nil { + return sm.State{}, nil, err + } + // save genesis doc to prevent a certain class of user errors (e.g. when it + // was changed, accidentally or not). Also good for audit trail. + saveGenesisDoc(stateDB, genDoc) + } + state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + if err != nil { + return sm.State{}, nil, err + } + return state, genDoc, nil +} + // panics if failed to unmarshal bytes func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { - bytes := db.Get(genesisDocKey) - if len(bytes) == 0 { + b := db.Get(genesisDocKey) + if len(b) == 0 { return nil, errors.New("Genesis doc not found") } var genDoc *types.GenesisDoc - err := cdc.UnmarshalJSON(bytes, &genDoc) + err := cdc.UnmarshalJSON(b, &genDoc) if err != nil { - cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes)) + panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b)) } return genDoc, nil } // panics if failed to marshal the given genesis document func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { - bytes, err := cdc.MarshalJSON(genDoc) + b, err := cdc.MarshalJSON(genDoc) if err != nil { - cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) + panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) } - db.SetSync(genesisDocKey, bytes) + db.SetSync(genesisDocKey, b) } func createAndStartPrivValidatorSocketClient( @@ -964,7 +1102,7 @@ func createAndStartPrivValidatorSocketClient( listener = privval.NewTCPListener(ln, ed25519.GenPrivKey()) default: return nil, fmt.Errorf( - "Wrong listen address: expected either 'tcp' or 'unix' protocols, got %s", + "wrong listen address: expected either 'tcp' or 'unix' protocols, got %s", protocol, ) } diff --git a/node/node_test.go b/node/node_test.go index a2725d84..6971ddd3 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -224,7 +224,7 @@ func TestCreateProposalBlock(t *testing.T) { // Make Mempool memplMetrics := mempl.PrometheusMetrics("node_test") - mempool := mempl.NewMempool( + mempool := mempl.NewCListMempool( config.Mempool, proxyApp.Mempool(), state.LastBlockHeight, diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index be65d2f1..3bccabd6 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -5,23 +5,40 @@ import ( "github.com/tendermint/tendermint/p2p/conn" ) +// Reactor is responsible for handling incoming messages on one or more +// Channel. Switch calls GetChannels when reactor is added to it. When a new +// peer joins our node, InitPeer and AddPeer are called. RemovePeer is called +// when the peer is stopped. Receive is called when a message is received on a +// channel associated with this reactor. +// +// Peer#Send or Peer#TrySend should be used to send the message to a peer. type Reactor interface { cmn.Service // Start, Stop // SetSwitch allows setting a switch. SetSwitch(*Switch) - // GetChannels returns the list of channel descriptors. + // GetChannels returns the list of MConnection.ChannelDescriptor. Make sure + // that each ID is unique across all the reactors added to the switch. GetChannels() []*conn.ChannelDescriptor - // AddPeer is called by the switch when a new peer is added. + // InitPeer is called by the switch before the peer is started. Use it to + // initialize data for the peer (e.g. peer state). + // + // NOTE: The switch won't call AddPeer nor RemovePeer if it fails to start + // the peer. Do not store any data associated with the peer in the reactor + // itself unless you don't want to have a state, which is never cleaned up. + InitPeer(peer Peer) Peer + + // AddPeer is called by the switch after the peer is added and successfully + // started. Use it to start goroutines communicating with the peer. AddPeer(peer Peer) // RemovePeer is called by the switch when the peer is stopped (due to error // or other reason). RemovePeer(peer Peer, reason interface{}) - // Receive is called when msgBytes is received from peer. + // Receive is called by the switch when msgBytes is received from the peer. // // NOTE reactor can not keep msgBytes around after Receive completes without // copying. @@ -51,3 +68,4 @@ func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil func (*BaseReactor) AddPeer(peer Peer) {} func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {} +func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index e0ce062a..ee29fc85 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -707,7 +707,7 @@ type Channel struct { func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { desc = desc.FillDefaults() if desc.Priority <= 0 { - cmn.PanicSanity("Channel default priority must be a positive integer") + panic("Channel default priority must be a positive integer") } return &Channel{ conn: conn, diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go index 36d6ee1b..7f76ac80 100644 --- a/p2p/conn/secret_connection.go +++ b/p2p/conn/secret_connection.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "errors" "io" + "math" "net" "sync" "time" @@ -439,6 +440,11 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature [] // (little-endian in nonce[4:]). func incrNonce(nonce *[aeadNonceSize]byte) { counter := binary.LittleEndian.Uint64(nonce[4:]) + if counter == math.MaxUint64 { + // Terminates the session and makes sure the nonce would not re-used. + // See https://github.com/tendermint/tendermint/issues/3531 + panic("can't increase nonce without overflow") + } counter++ binary.LittleEndian.PutUint64(nonce[4:], counter) } diff --git a/p2p/metrics.go b/p2p/metrics.go index 3a6b9568..675dd9c7 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -47,13 +47,13 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Subsystem: MetricsSubsystem, Name: "peer_receive_bytes_total", Help: "Number of bytes received from a given peer.", - }, append(labels, "peer_id")).With(labelsAndValues...), + }, append(labels, "peer_id", "chID")).With(labelsAndValues...), PeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "peer_send_bytes_total", Help: "Number of bytes sent to a given peer.", - }, append(labels, "peer_id")).With(labelsAndValues...), + }, append(labels, "peer_id", "chID")).With(labelsAndValues...), PeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, diff --git a/p2p/netaddress.go b/p2p/netaddress.go index 5534ded9..d1150452 100644 --- a/p2p/netaddress.go +++ b/p2p/netaddress.go @@ -14,8 +14,6 @@ import ( "time" "errors" - - cmn "github.com/tendermint/tendermint/libs/common" ) // NetAddress defines information about a peer on the network @@ -48,7 +46,7 @@ func NewNetAddress(id ID, addr net.Addr) *NetAddress { tcpAddr, ok := addr.(*net.TCPAddr) if !ok { if flag.Lookup("test.v") == nil { // normal run - cmn.PanicSanity(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr)) + panic(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr)) } else { // in testing netAddr := NewNetAddressIPPort(net.IP("0.0.0.0"), 0) netAddr.ID = id @@ -67,36 +65,27 @@ func NewNetAddress(id ID, addr net.Addr) *NetAddress { // Also resolves the host if host is not an IP. // Errors are of type ErrNetAddressXxx where Xxx is in (NoID, Invalid, Lookup) func NewNetAddressString(addr string) (*NetAddress, error) { - spl := strings.Split(addr, "@") - if len(spl) < 2 { + addrWithoutProtocol := removeProtocolIfDefined(addr) + spl := strings.Split(addrWithoutProtocol, "@") + if len(spl) != 2 { return nil, ErrNetAddressNoID{addr} } - return NewNetAddressStringWithOptionalID(addr) -} -// NewNetAddressStringWithOptionalID returns a new NetAddress using the -// provided address in the form of "ID@IP:Port", where the ID is optional. -// Also resolves the host if host is not an IP. -func NewNetAddressStringWithOptionalID(addr string) (*NetAddress, error) { - addrWithoutProtocol := removeProtocolIfDefined(addr) - - var id ID - spl := strings.Split(addrWithoutProtocol, "@") - if len(spl) == 2 { - idStr := spl[0] - idBytes, err := hex.DecodeString(idStr) - if err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - if len(idBytes) != IDByteLength { - return nil, ErrNetAddressInvalid{ - addrWithoutProtocol, - fmt.Errorf("invalid hex length - got %d, expected %d", len(idBytes), IDByteLength)} - } - - id, addrWithoutProtocol = ID(idStr), spl[1] + // get ID + idStr := spl[0] + idBytes, err := hex.DecodeString(idStr) + if err != nil { + return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} } + if len(idBytes) != IDByteLength { + return nil, ErrNetAddressInvalid{ + addrWithoutProtocol, + fmt.Errorf("invalid hex length - got %d, expected %d", len(idBytes), IDByteLength)} + } + var id ID + id, addrWithoutProtocol = ID(idStr), spl[1] + // get host and port host, portStr, err := net.SplitHostPort(addrWithoutProtocol) if err != nil { return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} diff --git a/p2p/netaddress_test.go b/p2p/netaddress_test.go index e7b184a7..7afcab13 100644 --- a/p2p/netaddress_test.go +++ b/p2p/netaddress_test.go @@ -20,17 +20,23 @@ func TestNewNetAddress(t *testing.T) { }, "Calling NewNetAddress with UDPAddr should not panic in testing") } -func TestNewNetAddressStringWithOptionalID(t *testing.T) { +func TestNewNetAddressString(t *testing.T) { testCases := []struct { name string addr string expected string correct bool }{ - {"no node id, no protocol", "127.0.0.1:8080", "127.0.0.1:8080", true}, - {"no node id, tcp input", "tcp://127.0.0.1:8080", "127.0.0.1:8080", true}, - {"no node id, udp input", "udp://127.0.0.1:8080", "127.0.0.1:8080", true}, - {"malformed udp input", "udp//127.0.0.1:8080", "", false}, + {"no node id and no protocol", "127.0.0.1:8080", "", false}, + {"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false}, + {"no node id w/ udp input", "udp://127.0.0.1:8080", "", false}, + + {"no protocol", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, + {"tcp input", "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, + {"udp input", "udp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, + {"malformed tcp input", "tcp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + {"malformed udp input", "udp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + // {"127.0.0:8080", false}, {"invalid host", "notahost", "", false}, {"invalid port", "127.0.0.1:notapath", "", false}, @@ -41,14 +47,13 @@ func TestNewNetAddressStringWithOptionalID(t *testing.T) { {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false}, {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false}, {"not hex nodeId", "xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - {"correct nodeId", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, {"correct nodeId w/tcp", "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, - {"no node id when expected", "tcp://@127.0.0.1:8080", "", false}, + {"no node id", "tcp://@127.0.0.1:8080", "", false}, {"no node id or IP", "tcp://@", "", false}, {"tcp no host, w/ port", "tcp://:26656", "", false}, {"empty", "", "", false}, @@ -59,7 +64,7 @@ func TestNewNetAddressStringWithOptionalID(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - addr, err := NewNetAddressStringWithOptionalID(tc.addr) + addr, err := NewNetAddressString(tc.addr) if tc.correct { if assert.Nil(t, err, tc.addr) { assert.Equal(t, tc.expected, addr.String()) @@ -71,28 +76,6 @@ func TestNewNetAddressStringWithOptionalID(t *testing.T) { } } -func TestNewNetAddressString(t *testing.T) { - testCases := []struct { - addr string - expected string - correct bool - }{ - {"127.0.0.1:8080", "127.0.0.1:8080", false}, - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, - } - - for _, tc := range testCases { - addr, err := NewNetAddressString(tc.addr) - if tc.correct { - if assert.Nil(t, err, tc.addr) { - assert.Equal(t, tc.expected, addr.String()) - } - } else { - assert.NotNil(t, err, tc.addr) - } - } -} - func TestNewNetAddressStrings(t *testing.T) { addrs, errs := NewNetAddressStrings([]string{ "127.0.0.1:8080", @@ -115,12 +98,12 @@ func TestNetAddressProperties(t *testing.T) { local bool routable bool }{ - {"127.0.0.1:8080", true, true, false}, - {"ya.ru:80", true, false, true}, + {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true, true, false}, + {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", true, false, true}, } for _, tc := range testCases { - addr, err := NewNetAddressStringWithOptionalID(tc.addr) + addr, err := NewNetAddressString(tc.addr) require.Nil(t, err) assert.Equal(t, tc.valid, addr.Valid()) @@ -136,15 +119,15 @@ func TestNetAddressReachabilityTo(t *testing.T) { other string reachability int }{ - {"127.0.0.1:8080", "127.0.0.1:8081", 0}, - {"ya.ru:80", "127.0.0.1:8080", 1}, + {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8081", 0}, + {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", 1}, } for _, tc := range testCases { - addr, err := NewNetAddressStringWithOptionalID(tc.addr) + addr, err := NewNetAddressString(tc.addr) require.Nil(t, err) - other, err := NewNetAddressStringWithOptionalID(tc.other) + other, err := NewNetAddressString(tc.other) require.Nil(t, err) assert.Equal(t, tc.reachability, addr.ReachabilityTo(other)) diff --git a/p2p/peer.go b/p2p/peer.go index fab3b42d..80be0db5 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -248,7 +248,11 @@ func (p *peer) Send(chID byte, msgBytes []byte) bool { } res := p.mconn.Send(chID, msgBytes) if res { - p.metrics.PeerSendBytesTotal.With("peer_id", string(p.ID())).Add(float64(len(msgBytes))) + labels := []string{ + "peer_id", string(p.ID()), + "chID", fmt.Sprintf("%#x", chID), + } + p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) } return res } @@ -263,7 +267,11 @@ func (p *peer) TrySend(chID byte, msgBytes []byte) bool { } res := p.mconn.TrySend(chID, msgBytes) if res { - p.metrics.PeerSendBytesTotal.With("peer_id", string(p.ID())).Add(float64(len(msgBytes))) + labels := []string{ + "peer_id", string(p.ID()), + "chID", fmt.Sprintf("%#x", chID), + } + p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) } return res } @@ -369,7 +377,11 @@ func createMConnection( // which does onPeerError. panic(fmt.Sprintf("Unknown channel %X", chID)) } - p.metrics.PeerReceiveBytesTotal.With("peer_id", string(p.ID())).Add(float64(len(msgBytes))) + labels := []string{ + "peer_id", string(p.ID()), + "chID", fmt.Sprintf("%#x", chID), + } + p.metrics.PeerReceiveBytesTotal.With(labels...).Add(float64(len(msgBytes))) reactor.Receive(chID, p, msgBytes) } diff --git a/p2p/pex/file.go b/p2p/pex/file.go index d4a51685..a42eddaf 100644 --- a/p2p/pex/file.go +++ b/p2p/pex/file.go @@ -53,14 +53,14 @@ func (a *addrBook) loadFromFile(filePath string) bool { // Load addrBookJSON{} r, err := os.Open(filePath) if err != nil { - cmn.PanicCrisis(fmt.Sprintf("Error opening file %s: %v", filePath, err)) + panic(fmt.Sprintf("Error opening file %s: %v", filePath, err)) } defer r.Close() // nolint: errcheck aJSON := &addrBookJSON{} dec := json.NewDecoder(r) err = dec.Decode(aJSON) if err != nil { - cmn.PanicCrisis(fmt.Sprintf("Error reading file %s: %v", filePath, err)) + panic(fmt.Sprintf("Error reading file %s: %v", filePath, err)) } // Restore all the fields... diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index c24ee983..e77fa8ea 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -48,6 +48,24 @@ const ( biasToSelectNewPeers = 30 // 70 to select good peers ) +type errMaxAttemptsToDial struct { +} + +func (e errMaxAttemptsToDial) Error() string { + return fmt.Sprintf("reached max attempts %d to dial", maxAttemptsToDial) +} + +type errTooEarlyToDial struct { + backoffDuration time.Duration + lastDialed time.Time +} + +func (e errTooEarlyToDial) Error() string { + return fmt.Sprintf( + "too early to dial (backoff duration: %d, last dialed: %v, time since: %v)", + e.backoffDuration, e.lastDialed, time.Since(e.lastDialed)) +} + // PEXReactor handles PEX (peer exchange) and ensures that an // adequate number of peers are connected to the switch. // @@ -127,7 +145,7 @@ func (r *PEXReactor) OnStart() error { if err != nil { return err } else if numOnline == 0 && r.book.Empty() { - return errors.New("Address book is empty, and could not connect to any seed nodes") + return errors.New("Address book is empty and couldn't resolve any seed nodes") } r.seedAddrs = seedAddrs @@ -186,6 +204,13 @@ func (r *PEXReactor) AddPeer(p Peer) { } } +// RemovePeer implements Reactor by resetting peer's requests info. +func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) { + id := string(p.ID()) + r.requestsSent.Delete(id) + r.lastReceivedRequests.Delete(id) +} + func (r *PEXReactor) logErrAddrBook(err error) { if err != nil { switch err.(type) { @@ -198,13 +223,6 @@ func (r *PEXReactor) logErrAddrBook(err error) { } } -// RemovePeer implements Reactor. -func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) { - id := string(p.ID()) - r.requestsSent.Delete(id) - r.lastReceivedRequests.Delete(id) -} - // Receive implements Reactor by handling incoming PEX messages. func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) { msg, err := decodeMsg(msgBytes) @@ -285,7 +303,7 @@ func (r *PEXReactor) receiveRequest(src Peer) error { now := time.Now() minInterval := r.minReceiveRequestInterval() if now.Sub(lastReceived) < minInterval { - return fmt.Errorf("Peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting", + return fmt.Errorf("peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting", src.ID(), lastReceived, now, @@ -296,14 +314,14 @@ func (r *PEXReactor) receiveRequest(src Peer) error { return nil } -// RequestAddrs asks peer for more addresses if we do not already -// have a request out for this peer. +// RequestAddrs asks peer for more addresses if we do not already have a +// request out for this peer. func (r *PEXReactor) RequestAddrs(p Peer) { - r.Logger.Debug("Request addrs", "from", p) id := string(p.ID()) if r.requestsSent.Has(id) { return } + r.Logger.Debug("Request addrs", "from", p) r.requestsSent.Set(id, struct{}{}) p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexRequestMessage{})) } @@ -314,7 +332,7 @@ func (r *PEXReactor) RequestAddrs(p Peer) { func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { id := string(src.ID()) if !r.requestsSent.Has(id) { - return errors.New("Unsolicited pexAddrsMessage") + return errors.New("unsolicited pexAddrsMessage") } r.requestsSent.Delete(id) @@ -327,7 +345,7 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { if netAddr == nil { return errors.New("nil address in pexAddrsMessage") } - // TODO: extract validating logic from NewNetAddressStringWithOptionalID + // TODO: extract validating logic from NewNetAddressString // and put it in netAddr#Valid (#2722) na, err := p2p.NewNetAddressString(netAddr.String()) if err != nil { @@ -449,11 +467,21 @@ func (r *PEXReactor) ensurePeers() { // Dial picked addresses for _, addr := range toDial { - go r.dialPeer(addr) + go func(addr *p2p.NetAddress) { + err := r.dialPeer(addr) + if err != nil { + switch err.(type) { + case errMaxAttemptsToDial, errTooEarlyToDial: + r.Logger.Debug(err.Error(), "addr", addr) + default: + r.Logger.Error(err.Error(), "addr", addr) + } + } + }(addr) } - // If we need more addresses, pick a random peer and ask for more. if r.book.NeedMoreAddrs() { + // 1) Pick a random peer and ask for more. peers := r.Switch.Peers().List() peersCount := len(peers) if peersCount > 0 { @@ -461,12 +489,14 @@ func (r *PEXReactor) ensurePeers() { r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer) r.RequestAddrs(peer) } - } - // If we are not connected to nor dialing anybody, fallback to dialing a seed. - if out+in+dial+len(toDial) == 0 { - r.Logger.Info("No addresses to dial nor connected peers. Falling back to seeds") - r.dialSeeds() + // 2) Dial seeds if we are not dialing anyone. + // This is done in addition to asking a peer for addresses to work-around + // peers not participating in PEX. + if len(toDial) == 0 { + r.Logger.Info("No addresses to dial. Falling back to seeds") + r.dialSeeds() + } } } @@ -479,17 +509,16 @@ func (r *PEXReactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastD return atd.number, atd.lastDialed } -func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) { +func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) error { attempts, lastDialed := r.dialAttemptsInfo(addr) if attempts > maxAttemptsToDial { - // Do not log the message if the addr gets readded. - if attempts+1 == maxAttemptsToDial { - r.Logger.Info("Reached max attempts to dial", "addr", addr, "attempts", attempts) - r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) - } + // TODO(melekes): have a blacklist in the addrbook with peers whom we've + // failed to connect to. Then we can clean up attemptsToDial, which acts as + // a blacklist currently. + // https://github.com/tendermint/tendermint/issues/3572 r.book.MarkBad(addr) - return + return errMaxAttemptsToDial{} } // exponential backoff if it's not our first attempt to dial given address @@ -498,33 +527,30 @@ func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) { backoffDuration := jitterSeconds + ((1 << uint(attempts)) * time.Second) sinceLastDialed := time.Since(lastDialed) if sinceLastDialed < backoffDuration { - r.Logger.Debug("Too early to dial", "addr", addr, "backoff_duration", backoffDuration, "last_dialed", lastDialed, "time_since", sinceLastDialed) - return + return errTooEarlyToDial{backoffDuration, lastDialed} } } - err := r.Switch.DialPeerWithAddress(addr, false) + err := r.Switch.DialPeerWithAddress(addr) if err != nil { if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok { - return + return err } - r.Logger.Error("Dialing failed", "addr", addr, "err", err, "attempts", attempts) markAddrInBookBasedOnErr(addr, r.book, err) - if _, ok := err.(p2p.ErrSwitchAuthenticationFailure); ok { + switch err.(type) { + case p2p.ErrSwitchAuthenticationFailure: + // NOTE: addr is removed from addrbook in markAddrInBookBasedOnErr r.attemptsToDial.Delete(addr.DialString()) - } else { - // FIXME: if the addr is going to be removed from the addrbook (hard to - // tell at this point), we need to Delete it from attemptsToDial, not - // record another attempt. - // record attempt + default: r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) } - return + return errors.Wrapf(err, "dialing failed (attempts: %d)", attempts+1) } // cleanup any history r.attemptsToDial.Delete(addr.DialString()) + return nil } // checkSeeds checks that addresses are well formed. @@ -547,7 +573,7 @@ func (r *PEXReactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, er return 0, nil, errors.Wrap(e, "seed node configuration has error") } } - return + return numOnline, netAddrs, nil } // randomly dial seeds until we connect to one or exhaust them @@ -557,7 +583,7 @@ func (r *PEXReactor) dialSeeds() { for _, i := range perm { // dial a random seed seedAddr := r.seedAddrs[i] - err := r.Switch.DialPeerWithAddress(seedAddr, false) + err := r.Switch.DialPeerWithAddress(seedAddr) if err == nil { return } @@ -582,8 +608,13 @@ func (r *PEXReactor) AttemptsToDial(addr *p2p.NetAddress) int { // Seed/Crawler Mode causes this node to quickly disconnect // from peers, except other seed nodes. func (r *PEXReactor) crawlPeersRoutine() { - // Do an initial crawl - r.crawlPeers(r.book.GetSelection()) + // If we have any seed nodes, consult them first + if len(r.seedAddrs) > 0 { + r.dialSeeds() + } else { + // Do an initial crawl + r.crawlPeers(r.book.GetSelection()) + } // Fire periodically ticker := time.NewTicker(crawlPeerPeriod) @@ -633,14 +664,14 @@ func (r *PEXReactor) crawlPeers(addrs []*p2p.NetAddress) { LastCrawled: now, } - err := r.Switch.DialPeerWithAddress(addr, false) + err := r.dialPeer(addr) if err != nil { - if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok { - continue + switch err.(type) { + case errMaxAttemptsToDial, errTooEarlyToDial: + r.Logger.Debug(err.Error(), "addr", addr) + default: + r.Logger.Error(err.Error(), "addr", addr) } - - r.Logger.Error("Dialing failed", "addr", addr, "err", err) - markAddrInBookBasedOnErr(addr, r.book, err) continue } diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index 077f07a6..f4b7cc26 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -144,7 +144,7 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) { sw.SetAddrBook(book) peer := mock.NewPeer(nil) - p2p.AddPeerToSwitch(sw, peer) + p2p.AddPeerToSwitchPeerSet(sw, peer) assert.True(t, sw.Peers().Has(peer.ID())) id := string(peer.ID()) @@ -174,7 +174,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { sw.SetAddrBook(book) peer := mock.NewPeer(nil) - p2p.AddPeerToSwitch(sw, peer) + p2p.AddPeerToSwitchPeerSet(sw, peer) assert.True(t, sw.Peers().Has(peer.ID())) id := string(peer.ID()) @@ -291,7 +291,8 @@ func TestPEXReactorSeedMode(t *testing.T) { require.Nil(t, err) defer os.RemoveAll(dir) // nolint: errcheck - pexR, book := createReactor(&PEXReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond}) + pexRConfig := &PEXReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond} + pexR, book := createReactor(pexRConfig) defer teardownReactor(book) sw := createSwitchAndAddReactors(pexR) @@ -315,13 +316,80 @@ func TestPEXReactorSeedMode(t *testing.T) { pexR.attemptDisconnects() assert.Equal(t, 1, sw.Peers().Size()) - time.Sleep(100 * time.Millisecond) + // sleep for SeedDisconnectWaitPeriod + time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) // 3. attemptDisconnects should disconnect after wait period pexR.attemptDisconnects() assert.Equal(t, 0, sw.Peers().Size()) } +func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { + // directory to store address books + dir, err := ioutil.TempDir("", "pex_reactor") + require.Nil(t, err) + defer os.RemoveAll(dir) // nolint: errcheck + + pexRConfig := &PEXReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond} + pexR, book := createReactor(pexRConfig) + defer teardownReactor(book) + + sw := createSwitchAndAddReactors(pexR) + sw.SetAddrBook(book) + err = sw.Start() + require.NoError(t, err) + defer sw.Stop() + + assert.Zero(t, sw.Peers().Size()) + + peerSwitch := testCreateDefaultPeer(dir, 1) + require.NoError(t, peerSwitch.Start()) + defer peerSwitch.Stop() + + err = sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()}) + require.NoError(t, err) + + // 1. Test crawlPeers dials the peer + pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) + assert.Equal(t, 1, sw.Peers().Size()) + assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) + + // sleep for SeedDisconnectWaitPeriod + time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) + + // 2. attemptDisconnects should not disconnect because the peer is persistent + pexR.attemptDisconnects() + assert.Equal(t, 1, sw.Peers().Size()) +} + +func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { + // directory to store address books + dir, err := ioutil.TempDir("", "pex_reactor") + require.Nil(t, err) + defer os.RemoveAll(dir) // nolint: errcheck + + pexR, book := createReactor(&PEXReactorConfig{SeedMode: true}) + defer teardownReactor(book) + + sw := createSwitchAndAddReactors(pexR) + sw.SetAddrBook(book) + err = sw.Start() + require.NoError(t, err) + defer sw.Stop() + + peer := mock.NewPeer(nil) + addr := peer.SocketAddr() + + err = book.AddAddress(addr, addr) + require.NoError(t, err) + + assert.True(t, book.HasAddress(addr)) + // imitate maxAttemptsToDial reached + pexR.attemptsToDial.Store(addr.DialString(), _attemptsToDial{maxAttemptsToDial + 1, time.Now()}) + pexR.crawlPeers([]*p2p.NetAddress{addr}) + assert.False(t, book.HasAddress(addr)) +} + // connect a peer to a seed, wait a bit, then stop it. // this should give it time to request addrs and for the seed // to call FlushStop, and allows us to test calling Stop concurrently @@ -370,7 +438,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { reactor := switches[0].Reactors()["pex"].(*PEXReactor) peerID := switches[1].NodeInfo().ID() - err = switches[1].DialPeerWithAddress(switches[0].NetAddress(), false) + err = switches[1].DialPeerWithAddress(switches[0].NetAddress()) assert.NoError(t, err) // sleep up to a second while waiting for the peer to send us a message. diff --git a/p2p/switch.go b/p2p/switch.go index afd7d965..7e681d67 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -77,6 +77,8 @@ type Switch struct { nodeInfo NodeInfo // our node info nodeKey *NodeKey // our node privkey addrBook AddrBook + // peers addresses with whom we'll maintain constant connection + persistentPeersAddrs []*NetAddress transport Transport @@ -104,16 +106,17 @@ func NewSwitch( options ...SwitchOption, ) *Switch { sw := &Switch{ - config: cfg, - reactors: make(map[string]Reactor), - chDescs: make([]*conn.ChannelDescriptor, 0), - reactorsByCh: make(map[byte]Reactor), - peers: NewPeerSet(), - dialing: cmn.NewCMap(), - reconnecting: cmn.NewCMap(), - metrics: NopMetrics(), - transport: transport, - filterTimeout: defaultFilterTimeout, + config: cfg, + reactors: make(map[string]Reactor), + chDescs: make([]*conn.ChannelDescriptor, 0), + reactorsByCh: make(map[byte]Reactor), + peers: NewPeerSet(), + dialing: cmn.NewCMap(), + reconnecting: cmn.NewCMap(), + metrics: NopMetrics(), + transport: transport, + filterTimeout: defaultFilterTimeout, + persistentPeersAddrs: make([]*NetAddress, 0), } // Ensure we have a completely undeterministic PRNG. @@ -155,7 +158,7 @@ func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { for _, chDesc := range reactorChannels { chID := chDesc.ID if sw.reactorsByCh[chID] != nil { - cmn.PanicSanity(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor)) + panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor)) } sw.chDescs = append(sw.chDescs, chDesc) sw.reactorsByCh[chID] = reactor @@ -218,11 +221,7 @@ func (sw *Switch) OnStart() error { func (sw *Switch) OnStop() { // Stop peers for _, p := range sw.peers.List() { - sw.transport.Cleanup(p) - p.Stop() - if sw.peers.Remove(p) { - sw.metrics.Peers.Add(float64(-1)) - } + sw.stopAndRemovePeer(p, nil) } // Stop reactors @@ -297,7 +296,19 @@ func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { sw.stopAndRemovePeer(peer, reason) if peer.IsPersistent() { - go sw.reconnectToPeer(peer.SocketAddr()) + var addr *NetAddress + if peer.IsOutbound() { // socket address for outbound peers + addr = peer.SocketAddr() + } else { // self-reported address for inbound peers + var err error + addr, err = peer.NodeInfo().NetAddress() + if err != nil { + sw.Logger.Error("Wanted to reconnect to inbound peer, but self-reported address is wrong", + "peer", peer, "err", err) + return + } + } + go sw.reconnectToPeer(addr) } } @@ -309,14 +320,20 @@ func (sw *Switch) StopPeerGracefully(peer Peer) { } func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { - if sw.peers.Remove(peer) { - sw.metrics.Peers.Add(float64(-1)) - } sw.transport.Cleanup(peer) peer.Stop() + for _, reactor := range sw.reactors { reactor.RemovePeer(peer, reason) } + + // Removing a peer should go last to avoid a situation where a peer + // reconnect to our node and the switch calls InitPeer before + // RemovePeer is finished. + // https://github.com/tendermint/tendermint/issues/3338 + if sw.peers.Remove(peer) { + sw.metrics.Peers.Add(float64(-1)) + } } // reconnectToPeer tries to reconnect to the addr, first repeatedly @@ -341,7 +358,7 @@ func (sw *Switch) reconnectToPeer(addr *NetAddress) { return } - err := sw.DialPeerWithAddress(addr, true) + err := sw.DialPeerWithAddress(addr) if err == nil { return // success } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { @@ -365,7 +382,7 @@ func (sw *Switch) reconnectToPeer(addr *NetAddress) { sleepIntervalSeconds := math.Pow(reconnectBackOffBaseSeconds, float64(i)) sw.randomSleep(time.Duration(sleepIntervalSeconds) * time.Second) - err := sw.DialPeerWithAddress(addr, true) + err := sw.DialPeerWithAddress(addr) if err == nil { return // success } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { @@ -401,28 +418,41 @@ func isPrivateAddr(err error) bool { return ok && te.PrivateAddr() } -// DialPeersAsync dials a list of peers asynchronously in random order (optionally, making them persistent). +// DialPeersAsync dials a list of peers asynchronously in random order. // Used to dial peers from config on startup or from unsafe-RPC (trusted sources). -// TODO: remove addrBook arg since it's now set on the switch -func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent bool) error { +// It ignores ErrNetAddressLookup. However, if there are other errors, first +// encounter is returned. +// Nop if there are no peers. +func (sw *Switch) DialPeersAsync(peers []string) error { netAddrs, errs := NewNetAddressStrings(peers) - // only log errors, dial correct addresses + // report all the errors for _, err := range errs { sw.Logger.Error("Error in peer's address", "err", err) } + // return first non-ErrNetAddressLookup error + for _, err := range errs { + if _, ok := err.(ErrNetAddressLookup); ok { + continue + } + return err + } + sw.dialPeersAsync(netAddrs) + return nil +} +func (sw *Switch) dialPeersAsync(netAddrs []*NetAddress) { ourAddr := sw.NetAddress() // TODO: this code feels like it's in the wrong place. // The integration tests depend on the addrBook being saved // right away but maybe we can change that. Recall that // the addrBook is only written to disk every 2min - if addrBook != nil { + if sw.addrBook != nil { // add peers to `addrBook` for _, netAddr := range netAddrs { // do not add our address or ID if !netAddr.Same(ourAddr) { - if err := addrBook.AddAddress(netAddr, ourAddr); err != nil { + if err := sw.addrBook.AddAddress(netAddr, ourAddr); err != nil { if isPrivateAddr(err) { sw.Logger.Debug("Won't add peer's address to addrbook", "err", err) } else { @@ -433,7 +463,7 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b } // Persist some peers to disk right away. // NOTE: integration tests depend on this - addrBook.Save() + sw.addrBook.Save() } // permute the list, dial them in random order. @@ -450,7 +480,7 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b sw.randomSleep(0) - err := sw.DialPeerWithAddress(addr, persistent) + err := sw.DialPeerWithAddress(addr) if err != nil { switch err.(type) { case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeerID, ErrCurrentlyDialingOrExistingAddress: @@ -461,16 +491,13 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b } }(i) } - return nil } // DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects // and authenticates successfully. -// If `persistent == true`, the switch will always try to reconnect to this -// peer if the connection ever fails. // If we're currently dialing this address or it belongs to an existing peer, // ErrCurrentlyDialingOrExistingAddress is returned. -func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) error { +func (sw *Switch) DialPeerWithAddress(addr *NetAddress) error { if sw.IsDialingOrExistingAddress(addr) { return ErrCurrentlyDialingOrExistingAddress{addr.String()} } @@ -478,7 +505,7 @@ func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) error { sw.dialing.Set(string(addr.ID), addr) defer sw.dialing.Delete(string(addr.ID)) - return sw.addOutboundPeerWithConfig(addr, sw.config, persistent) + return sw.addOutboundPeerWithConfig(addr, sw.config) } // sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds] @@ -495,6 +522,38 @@ func (sw *Switch) IsDialingOrExistingAddress(addr *NetAddress) bool { (!sw.config.AllowDuplicateIP && sw.peers.HasIP(addr.IP)) } +// AddPersistentPeers allows you to set persistent peers. It ignores +// ErrNetAddressLookup. However, if there are other errors, first encounter is +// returned. +func (sw *Switch) AddPersistentPeers(addrs []string) error { + sw.Logger.Info("Adding persistent peers", "addrs", addrs) + netAddrs, errs := NewNetAddressStrings(addrs) + // report all the errors + for _, err := range errs { + sw.Logger.Error("Error in peer's address", "err", err) + } + // return first non-ErrNetAddressLookup error + for _, err := range errs { + if _, ok := err.(ErrNetAddressLookup); ok { + continue + } + return err + } + sw.persistentPeersAddrs = netAddrs + return nil +} + +func (sw *Switch) isPeerPersistentFn() func(*NetAddress) bool { + return func(na *NetAddress) bool { + for _, pa := range sw.persistentPeersAddrs { + if pa.Equals(na) { + return true + } + } + return false + } +} + func (sw *Switch) acceptRoutine() { for { p, err := sw.transport.Accept(peerConfig{ @@ -502,6 +561,7 @@ func (sw *Switch) acceptRoutine() { onPeerError: sw.StopPeerForError, reactorsByCh: sw.reactorsByCh, metrics: sw.metrics, + isPersistent: sw.isPeerPersistentFn(), }) if err != nil { switch err := err.(type) { @@ -581,13 +641,12 @@ func (sw *Switch) acceptRoutine() { // dial the peer; make secret connection; authenticate against the dialed ID; // add the peer. -// if dialing fails, start the reconnect loop. If handhsake fails, its over. -// If peer is started succesffuly, reconnectLoop will start when -// StopPeerForError is called +// if dialing fails, start the reconnect loop. If handshake fails, it's over. +// If peer is started successfully, reconnectLoop will start when +// StopPeerForError is called. func (sw *Switch) addOutboundPeerWithConfig( addr *NetAddress, cfg *config.P2PConfig, - persistent bool, ) error { sw.Logger.Info("Dialing peer", "address", addr) @@ -600,7 +659,7 @@ func (sw *Switch) addOutboundPeerWithConfig( p, err := sw.transport.Dial(*addr, peerConfig{ chDescs: sw.chDescs, onPeerError: sw.StopPeerForError, - persistent: persistent, + isPersistent: sw.isPeerPersistentFn(), reactorsByCh: sw.reactorsByCh, metrics: sw.metrics, }) @@ -619,7 +678,7 @@ func (sw *Switch) addOutboundPeerWithConfig( // retry persistent peers after // any dial error besides IsSelf() - if persistent { + if sw.isPeerPersistentFn()(addr) { go sw.reconnectToPeer(addr) } @@ -682,6 +741,11 @@ func (sw *Switch) addPeer(p Peer) error { return nil } + // Add some data to the peer, which is required by reactors. + for _, reactor := range sw.reactors { + p = reactor.InitPeer(p) + } + // Start the peer's send/recv routines. // Must start it before adding it to the peer set // to prevent Start and Stop from being called concurrently. diff --git a/p2p/switch_test.go b/p2p/switch_test.go index bf105e0f..aa5ca78b 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -12,6 +12,7 @@ import ( "regexp" "strconv" "sync" + "sync/atomic" "testing" "time" @@ -167,7 +168,7 @@ func TestSwitchFiltersOutItself(t *testing.T) { rp.Start() // addr should be rejected in addPeer based on the same ID - err := s1.DialPeerWithAddress(rp.Addr(), false) + err := s1.DialPeerWithAddress(rp.Addr()) if assert.Error(t, err) { if err, ok := err.(ErrRejected); ok { if !err.IsSelf() { @@ -212,6 +213,7 @@ func TestSwitchPeerFilter(t *testing.T) { p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ chDescs: sw.chDescs, onPeerError: sw.StopPeerForError, + isPersistent: sw.isPeerPersistentFn(), reactorsByCh: sw.reactorsByCh, }) if err != nil { @@ -256,6 +258,7 @@ func TestSwitchPeerFilterTimeout(t *testing.T) { p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ chDescs: sw.chDescs, onPeerError: sw.StopPeerForError, + isPersistent: sw.isPeerPersistentFn(), reactorsByCh: sw.reactorsByCh, }) if err != nil { @@ -281,6 +284,7 @@ func TestSwitchPeerFilterDuplicate(t *testing.T) { p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ chDescs: sw.chDescs, onPeerError: sw.StopPeerForError, + isPersistent: sw.isPeerPersistentFn(), reactorsByCh: sw.reactorsByCh, }) if err != nil { @@ -326,6 +330,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ chDescs: sw.chDescs, onPeerError: sw.StopPeerForError, + isPersistent: sw.isPeerPersistentFn(), reactorsByCh: sw.reactorsByCh, }) require.Nil(err) @@ -390,49 +395,32 @@ func TestSwitchStopPeerForError(t *testing.T) { assert.EqualValues(t, 0, peersMetricValue()) } -func TestSwitchReconnectsToPersistentPeer(t *testing.T) { - assert, require := assert.New(t), require.New(t) - +func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) err := sw.Start() - if err != nil { - t.Error(err) - } + require.NoError(t, err) defer sw.Stop() - // simulate remote peer + // 1. simulate failure by closing connection rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} rp.Start() defer rp.Stop() - p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ - chDescs: sw.chDescs, - onPeerError: sw.StopPeerForError, - persistent: true, - reactorsByCh: sw.reactorsByCh, - }) - require.Nil(err) + err = sw.AddPersistentPeers([]string{rp.Addr().String()}) + require.NoError(t, err) - require.Nil(sw.addPeer(p)) + err = sw.DialPeerWithAddress(rp.Addr()) + require.Nil(t, err) + require.NotNil(t, sw.Peers().Get(rp.ID())) - require.NotNil(sw.Peers().Get(rp.ID())) - - // simulate failure by closing connection + p := sw.Peers().List()[0] p.(*peer).CloseConn() - // TODO: remove sleep, detect the disconnection, wait for reconnect - npeers := sw.Peers().Size() - for i := 0; i < 20; i++ { - time.Sleep(250 * time.Millisecond) - npeers = sw.Peers().Size() - if npeers > 0 { - break - } - } - assert.NotZero(npeers) - assert.False(p.IsRunning()) + waitUntilSwitchHasAtLeastNPeers(sw, 1) + assert.False(t, p.IsRunning()) // old peer instance + assert.Equal(t, 1, sw.Peers().Size()) // new peer instance - // simulate another remote peer + // 2. simulate first time dial failure rp = &remotePeer{ PrivKey: ed25519.GenPrivKey(), Config: cfg, @@ -443,23 +431,68 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) { rp.Start() defer rp.Stop() - // simulate first time dial failure conf := config.DefaultP2PConfig() - conf.TestDialFail = true - err = sw.addOutboundPeerWithConfig(rp.Addr(), conf, true) - require.NotNil(err) - + conf.TestDialFail = true // will trigger a reconnect + err = sw.addOutboundPeerWithConfig(rp.Addr(), conf) + require.NotNil(t, err) // DialPeerWithAddres - sw.peerConfig resets the dialer + waitUntilSwitchHasAtLeastNPeers(sw, 2) + assert.Equal(t, 2, sw.Peers().Size()) +} - // TODO: same as above +func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) { + sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + err := sw.Start() + require.NoError(t, err) + defer sw.Stop() + + // 1. simulate failure by closing the connection + rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} + rp.Start() + defer rp.Stop() + + err = sw.AddPersistentPeers([]string{rp.Addr().String()}) + require.NoError(t, err) + + conn, err := rp.Dial(sw.NetAddress()) + require.NoError(t, err) + time.Sleep(50 * time.Millisecond) + require.NotNil(t, sw.Peers().Get(rp.ID())) + + conn.Close() + + waitUntilSwitchHasAtLeastNPeers(sw, 1) + assert.Equal(t, 1, sw.Peers().Size()) +} + +func TestSwitchDialPeersAsync(t *testing.T) { + if testing.Short() { + return + } + + sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + err := sw.Start() + require.NoError(t, err) + defer sw.Stop() + + rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} + rp.Start() + defer rp.Stop() + + err = sw.DialPeersAsync([]string{rp.Addr().String()}) + require.NoError(t, err) + time.Sleep(dialRandomizerIntervalMilliseconds * time.Millisecond) + require.NotNil(t, sw.Peers().Get(rp.ID())) +} + +func waitUntilSwitchHasAtLeastNPeers(sw *Switch, n int) { for i := 0; i < 20; i++ { time.Sleep(250 * time.Millisecond) - npeers = sw.Peers().Size() - if npeers > 1 { + has := sw.Peers().Size() + if has >= n { break } } - assert.EqualValues(2, npeers) } func TestSwitchFullConnectivity(t *testing.T) { @@ -571,6 +604,71 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) { }) } +// mockReactor checks that InitPeer never called before RemovePeer. If that's +// not true, InitCalledBeforeRemoveFinished will return true. +type mockReactor struct { + *BaseReactor + + // atomic + removePeerInProgress uint32 + initCalledBeforeRemoveFinished uint32 +} + +func (r *mockReactor) RemovePeer(peer Peer, reason interface{}) { + atomic.StoreUint32(&r.removePeerInProgress, 1) + defer atomic.StoreUint32(&r.removePeerInProgress, 0) + time.Sleep(100 * time.Millisecond) +} + +func (r *mockReactor) InitPeer(peer Peer) Peer { + if atomic.LoadUint32(&r.removePeerInProgress) == 1 { + atomic.StoreUint32(&r.initCalledBeforeRemoveFinished, 1) + } + + return peer +} + +func (r *mockReactor) InitCalledBeforeRemoveFinished() bool { + return atomic.LoadUint32(&r.initCalledBeforeRemoveFinished) == 1 +} + +// see stopAndRemovePeer +func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { + // make reactor + reactor := &mockReactor{} + reactor.BaseReactor = NewBaseReactor("mockReactor", reactor) + + // make switch + sw := MakeSwitch(cfg, 1, "testing", "123.123.123", func(i int, sw *Switch) *Switch { + sw.AddReactor("mock", reactor) + return sw + }) + err := sw.Start() + require.NoError(t, err) + defer sw.Stop() + + // add peer + rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} + rp.Start() + defer rp.Stop() + _, err = rp.Dial(sw.NetAddress()) + require.NoError(t, err) + // wait till the switch adds rp to the peer set + time.Sleep(50 * time.Millisecond) + + // stop peer asynchronously + go sw.StopPeerForError(sw.Peers().Get(rp.ID()), "test") + + // simulate peer reconnecting to us + _, err = rp.Dial(sw.NetAddress()) + require.NoError(t, err) + // wait till the switch adds rp to the peer set + time.Sleep(50 * time.Millisecond) + + // make sure reactor.RemovePeer is finished before InitPeer is called + assert.False(t, reactor.InitCalledBeforeRemoveFinished()) +} + func BenchmarkSwitchBroadcast(b *testing.B) { s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch { // Make bar reactors of bar channels each diff --git a/p2p/test_util.go b/p2p/test_util.go index f8020924..fa175aeb 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -27,7 +27,7 @@ func (ni mockNodeInfo) NetAddress() (*NetAddress, error) { return ni.addr, ni func (ni mockNodeInfo) Validate() error { return nil } func (ni mockNodeInfo) CompatibleWith(other NodeInfo) error { return nil } -func AddPeerToSwitch(sw *Switch, peer Peer) { +func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) { sw.peers.Add(peer) } diff --git a/p2p/transport.go b/p2p/transport.go index ebf77c9f..8d6ea236 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -37,11 +37,15 @@ type accept struct { // events. // TODO(xla): Refactor out with more static Reactor setup and PeerBehaviour. type peerConfig struct { - chDescs []*conn.ChannelDescriptor - onPeerError func(Peer, interface{}) - outbound, persistent bool - reactorsByCh map[byte]Reactor - metrics *Metrics + chDescs []*conn.ChannelDescriptor + onPeerError func(Peer, interface{}) + outbound bool + // isPersistent allows you to set a function, which, given socket address + // (for outbound peers) OR self-reported address (for inbound peers), tells + // if the peer is persistent or not. + isPersistent func(*NetAddress) bool + reactorsByCh map[byte]Reactor + metrics *Metrics } // Transport emits and connects to Peers. The implementation of Peer is left to @@ -446,9 +450,21 @@ func (mt *MultiplexTransport) wrapPeer( socketAddr *NetAddress, ) Peer { + persistent := false + if cfg.isPersistent != nil { + if cfg.outbound { + persistent = cfg.isPersistent(socketAddr) + } else { + selfReportedAddr, err := ni.NetAddress() + if err == nil { + persistent = cfg.isPersistent(selfReportedAddr) + } + } + } + peerConn := newPeerConn( cfg.outbound, - cfg.persistent, + persistent, c, socketAddr, ) diff --git a/p2p/transport_test.go b/p2p/transport_test.go index 35fd9c66..7580f025 100644 --- a/p2p/transport_test.go +++ b/p2p/transport_test.go @@ -8,8 +8,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/p2p/conn" ) @@ -39,6 +37,7 @@ func TestTransportMultiplexConnFilter(t *testing.T) { PrivKey: ed25519.GenPrivKey(), }, ) + id := mt.nodeKey.ID() MultiplexTransportConnFilters( func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil }, @@ -48,7 +47,7 @@ func TestTransportMultiplexConnFilter(t *testing.T) { }, )(mt) - addr, err := NewNetAddressStringWithOptionalID("127.0.0.1:0") + addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0")) if err != nil { t.Fatal(err) } @@ -60,13 +59,9 @@ func TestTransportMultiplexConnFilter(t *testing.T) { errc := make(chan error) go func() { - addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String()) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(id, mt.listener.Addr()) - _, err = addr.Dial() + _, err := addr.Dial() if err != nil { errc <- err return @@ -96,6 +91,7 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { PrivKey: ed25519.GenPrivKey(), }, ) + id := mt.nodeKey.ID() MultiplexTransportFilterTimeout(5 * time.Millisecond)(mt) MultiplexTransportConnFilters( @@ -105,7 +101,7 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { }, )(mt) - addr, err := NewNetAddressStringWithOptionalID("127.0.0.1:0") + addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0")) if err != nil { t.Fatal(err) } @@ -117,13 +113,9 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { errc := make(chan error) go func() { - addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String()) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(id, mt.listener.Addr()) - _, err = addr.Dial() + _, err := addr.Dial() if err != nil { errc <- err return @@ -144,9 +136,7 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { func TestTransportMultiplexAcceptMultiple(t *testing.T) { mt := testSetupMultiplexTransport(t) - id, addr := mt.nodeKey.ID(), mt.listener.Addr().String() - laddr, err := NewNetAddressStringWithOptionalID(IDAddressString(id, addr)) - require.NoError(t, err) + laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) var ( seed = rand.New(rand.NewSource(time.Now().UnixNano())) @@ -232,11 +222,7 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { // Simulate slow Peer. go func() { - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) c, err := addr.Dial() if err != nil { @@ -283,13 +269,9 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { }, ) ) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - _, err = dialer.Dial(*addr, peerConfig{}) + _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { errc <- err return @@ -329,13 +311,9 @@ func TestTransportMultiplexValidateNodeInfo(t *testing.T) { ) ) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - _, err = dialer.Dial(*addr, peerConfig{}) + _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { errc <- err return @@ -372,13 +350,9 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) { PrivKey: ed25519.GenPrivKey(), }, ) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - _, err = dialer.Dial(*addr, peerConfig{}) + _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { errc <- err return @@ -415,12 +389,9 @@ func TestTransportMultiplexDialRejectWrongID(t *testing.T) { ) wrongID := PubKeyToID(ed25519.GenPrivKey().PubKey()) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(wrongID, mt.listener.Addr().String())) - if err != nil { - t.Fatalf("invalid address with ID: %v", err) - } + addr := NewNetAddress(wrongID, mt.listener.Addr()) - _, err = dialer.Dial(*addr, peerConfig{}) + _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { t.Logf("connection failed: %v", err) if err, ok := err.(ErrRejected); ok { @@ -448,13 +419,9 @@ func TestTransportMultiplexRejectIncompatible(t *testing.T) { }, ) ) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - _, err = dialer.Dial(*addr, peerConfig{}) + _, err := dialer.Dial(*addr, peerConfig{}) if err != nil { errc <- err return @@ -479,13 +446,9 @@ func TestTransportMultiplexRejectSelf(t *testing.T) { errc := make(chan error) go func() { - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } + addr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - _, err = mt.Dial(*addr, peerConfig{}) + _, err := mt.Dial(*addr, peerConfig{}) if err != nil { errc <- err return @@ -609,7 +572,7 @@ func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport { ) ) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(id, "127.0.0.1:0")) + addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0")) if err != nil { t.Fatal(err) } diff --git a/p2p/trust/store.go b/p2p/trust/store.go index d6b4c049..fc1ad399 100644 --- a/p2p/trust/store.go +++ b/p2p/trust/store.go @@ -156,7 +156,7 @@ func (tms *TrustMetricStore) loadFromDB() bool { peers := make(map[string]MetricHistoryJSON) err := json.Unmarshal(bytes, &peers) if err != nil { - cmn.PanicCrisis(fmt.Sprintf("Could not unmarshal Trust Metric Store DB data: %v", err)) + panic(fmt.Sprintf("Could not unmarshal Trust Metric Store DB data: %v", err)) } // If history data exists in the file, diff --git a/privval/socket_listeners_test.go b/privval/socket_listeners_test.go index 498ef79c..3c4cb858 100644 --- a/privval/socket_listeners_test.go +++ b/privval/socket_listeners_test.go @@ -97,7 +97,15 @@ func TestListenerTimeoutAccept(t *testing.T) { } func TestListenerTimeoutReadWrite(t *testing.T) { - for _, tc := range listenerTestCases(t, time.Second, time.Millisecond) { + const ( + // This needs to be long enough s.t. the Accept will definitely succeed: + timeoutAccept = time.Second + // This can be really short but in the TCP case, the accept can + // also trigger a timeoutReadWrite. Hence, we need to give it some time. + // Note: this controls how long this test actually runs. + timeoutReadWrite = 10 * time.Millisecond + ) + for _, tc := range listenerTestCases(t, timeoutAccept, timeoutReadWrite) { go func(dialer SocketDialer) { _, err := dialer() if err != nil { @@ -110,8 +118,7 @@ func TestListenerTimeoutReadWrite(t *testing.T) { t.Fatal(err) } - time.Sleep(2 * time.Millisecond) - + // this will timeout because we don't write anything: msg := make([]byte, 200) _, err = c.Read(msg) opErr, ok := err.(*net.OpError) @@ -122,5 +129,9 @@ func TestListenerTimeoutReadWrite(t *testing.T) { if have, want := opErr.Op, "read"; have != want { t.Errorf("for %s listener, have %v, want %v", tc.description, have, want) } + + if !opErr.Timeout() { + t.Errorf("for %s listener, got unexpected error: have %v, want Timeout error", tc.description, opErr) + } } } diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go new file mode 100644 index 00000000..720e4849 --- /dev/null +++ b/rpc/client/examples_test.go @@ -0,0 +1,126 @@ +package client_test + +import ( + "bytes" + "fmt" + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctest "github.com/tendermint/tendermint/rpc/test" +) + +func ExampleHTTP_simple() { + // Start a tendermint node (and kvstore) in the background to test against + app := kvstore.NewKVStoreApplication() + node := rpctest.StartTendermint(app, rpctest.SuppressStdout, rpctest.RecreateConfig) + defer rpctest.StopTendermint(node) + + // Create our RPC client + rpcAddr := rpctest.GetConfig().RPC.ListenAddress + c := client.NewHTTP(rpcAddr, "/websocket") + + // Create a transaction + k := []byte("name") + v := []byte("satoshi") + tx := append(k, append([]byte("="), v...)...) + + // Broadcast the transaction and wait for it to commit (rather use + // c.BroadcastTxSync though in production) + bres, err := c.BroadcastTxCommit(tx) + if err != nil { + panic(err) + } + if bres.CheckTx.IsErr() || bres.DeliverTx.IsErr() { + panic("BroadcastTxCommit transaction failed") + } + + // Now try to fetch the value for the key + qres, err := c.ABCIQuery("/key", k) + if err != nil { + panic(err) + } + if qres.Response.IsErr() { + panic("ABCIQuery failed") + } + if !bytes.Equal(qres.Response.Key, k) { + panic("returned key does not match queried key") + } + if !bytes.Equal(qres.Response.Value, v) { + panic("returned value does not match sent value") + } + + fmt.Println("Sent tx :", string(tx)) + fmt.Println("Queried for :", string(qres.Response.Key)) + fmt.Println("Got value :", string(qres.Response.Value)) + + // Output: + // Sent tx : name=satoshi + // Queried for : name + // Got value : satoshi +} + +func ExampleHTTP_batching() { + // Start a tendermint node (and kvstore) in the background to test against + app := kvstore.NewKVStoreApplication() + node := rpctest.StartTendermint(app, rpctest.SuppressStdout, rpctest.RecreateConfig) + defer rpctest.StopTendermint(node) + + // Create our RPC client + rpcAddr := rpctest.GetConfig().RPC.ListenAddress + c := client.NewHTTP(rpcAddr, "/websocket") + + // Create our two transactions + k1 := []byte("firstName") + v1 := []byte("satoshi") + tx1 := append(k1, append([]byte("="), v1...)...) + + k2 := []byte("lastName") + v2 := []byte("nakamoto") + tx2 := append(k2, append([]byte("="), v2...)...) + + txs := [][]byte{tx1, tx2} + + // Create a new batch + batch := c.NewBatch() + + // Queue up our transactions + for _, tx := range txs { + if _, err := batch.BroadcastTxCommit(tx); err != nil { + panic(err) + } + } + + // Send the batch of 2 transactions + if _, err := batch.Send(); err != nil { + panic(err) + } + + // Now let's query for the original results as a batch + keys := [][]byte{k1, k2} + for _, key := range keys { + if _, err := batch.ABCIQuery("/key", key); err != nil { + panic(err) + } + } + + // Send the 2 queries and keep the results + results, err := batch.Send() + if err != nil { + panic(err) + } + + // Each result in the returned list is the deserialized result of each + // respective ABCIQuery response + for _, result := range results { + qr, ok := result.(*ctypes.ResultABCIQuery) + if !ok { + panic("invalid result type from ABCIQuery request") + } + fmt.Println(string(qr.Response.Key), "=", string(qr.Response.Value)) + } + + // Output: + // firstName = satoshi + // lastName = nakamoto +} diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index 4889b074..756ba281 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -15,7 +15,7 @@ type Waiter func(delta int64) (abort error) // but you can plug in another one func DefaultWaitStrategy(delta int64) (abort error) { if delta > 10 { - return errors.Errorf("Waiting for %d blocks... aborting", delta) + return errors.Errorf("waiting for %d blocks... aborting", delta) } else if delta > 0 { // estimate of wait time.... // wait half a second for the next block (in progress) diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index 55c7b4f1..3fd13da3 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -18,27 +18,72 @@ import ( ) /* -HTTP is a Client implementation that communicates with a tendermint node over -json rpc and websockets. +HTTP is a Client implementation that communicates with a Tendermint node over +JSON RPC and WebSockets. This is the main implementation you probably want to use in production code. -There are other implementations when calling the tendermint node in-process +There are other implementations when calling the Tendermint node in-process (Local), or when you want to mock out the server for test code (mock). You can subscribe for any event published by Tendermint using Subscribe method. -Note delivery is best-effort. If you don't read events fast enough or network -is slow, Tendermint might cancel the subscription. The client will attempt to +Note delivery is best-effort. If you don't read events fast enough or network is +slow, Tendermint might cancel the subscription. The client will attempt to resubscribe (you don't need to do anything). It will keep trying every second indefinitely until successful. + +Request batching is available for JSON RPC requests over HTTP, which conforms to +the JSON RPC specification (https://www.jsonrpc.org/specification#batch). See +the example for more details. */ type HTTP struct { remote string rpc *rpcclient.JSONRPCClient + + *baseRPCClient *WSEvents } -// NewHTTP takes a remote endpoint in the form tcp://: -// and the websocket path (which always seems to be "/websocket") +// BatchHTTP provides the same interface as `HTTP`, but allows for batching of +// requests (as per https://www.jsonrpc.org/specification#batch). Do not +// instantiate directly - rather use the HTTP.NewBatch() method to create an +// instance of this struct. +// +// Batching of HTTP requests is thread-safe in the sense that multiple +// goroutines can each create their own batches and send them using the same +// HTTP client. Multiple goroutines could also enqueue transactions in a single +// batch, but ordering of transactions in the batch cannot be guaranteed in such +// an example. +type BatchHTTP struct { + rpcBatch *rpcclient.JSONRPCRequestBatch + *baseRPCClient +} + +// rpcClient is an internal interface to which our RPC clients (batch and +// non-batch) must conform. Acts as an additional code-level sanity check to +// make sure the implementations stay coherent. +type rpcClient interface { + ABCIClient + HistoryClient + NetworkClient + SignClient + StatusClient +} + +// baseRPCClient implements the basic RPC method logic without the actual +// underlying RPC call functionality, which is provided by `caller`. +type baseRPCClient struct { + caller rpcclient.JSONRPCCaller +} + +var _ rpcClient = (*HTTP)(nil) +var _ rpcClient = (*BatchHTTP)(nil) +var _ rpcClient = (*baseRPCClient)(nil) + +//----------------------------------------------------------------------------- +// HTTP + +// NewHTTP takes a remote endpoint in the form ://: and +// the websocket path (which always seems to be "/websocket") func NewHTTP(remote, wsEndpoint string) *HTTP { rc := rpcclient.NewJSONRPCClient(remote) cdc := rc.Codec() @@ -46,39 +91,76 @@ func NewHTTP(remote, wsEndpoint string) *HTTP { rc.SetCodec(cdc) return &HTTP{ - rpc: rc, - remote: remote, - WSEvents: newWSEvents(cdc, remote, wsEndpoint), + rpc: rc, + remote: remote, + baseRPCClient: &baseRPCClient{caller: rc}, + WSEvents: newWSEvents(cdc, remote, wsEndpoint), } } var _ Client = (*HTTP)(nil) -func (c *HTTP) Status() (*ctypes.ResultStatus, error) { +// NewBatch creates a new batch client for this HTTP client. +func (c *HTTP) NewBatch() *BatchHTTP { + rpcBatch := c.rpc.NewRequestBatch() + return &BatchHTTP{ + rpcBatch: rpcBatch, + baseRPCClient: &baseRPCClient{ + caller: rpcBatch, + }, + } +} + +//----------------------------------------------------------------------------- +// BatchHTTP + +// Send is a convenience function for an HTTP batch that will trigger the +// compilation of the batched requests and send them off using the client as a +// single request. On success, this returns a list of the deserialized results +// from each request in the sent batch. +func (b *BatchHTTP) Send() ([]interface{}, error) { + return b.rpcBatch.Send() +} + +// Clear will empty out this batch of requests and return the number of requests +// that were cleared out. +func (b *BatchHTTP) Clear() int { + return b.rpcBatch.Clear() +} + +// Count returns the number of enqueued requests waiting to be sent. +func (b *BatchHTTP) Count() int { + return b.rpcBatch.Count() +} + +//----------------------------------------------------------------------------- +// baseRPCClient + +func (c *baseRPCClient) Status() (*ctypes.ResultStatus, error) { result := new(ctypes.ResultStatus) - _, err := c.rpc.Call("status", map[string]interface{}{}, result) + _, err := c.caller.Call("status", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "Status") } return result, nil } -func (c *HTTP) ABCIInfo() (*ctypes.ResultABCIInfo, error) { +func (c *baseRPCClient) ABCIInfo() (*ctypes.ResultABCIInfo, error) { result := new(ctypes.ResultABCIInfo) - _, err := c.rpc.Call("abci_info", map[string]interface{}{}, result) + _, err := c.caller.Call("abci_info", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "ABCIInfo") } return result, nil } -func (c *HTTP) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (c *baseRPCClient) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) } -func (c *HTTP) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { +func (c *baseRPCClient) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { result := new(ctypes.ResultABCIQuery) - _, err := c.rpc.Call("abci_query", + _, err := c.caller.Call("abci_query", map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, result) if err != nil { @@ -87,89 +169,89 @@ func (c *HTTP) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQue return result, nil } -func (c *HTTP) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *baseRPCClient) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { result := new(ctypes.ResultBroadcastTxCommit) - _, err := c.rpc.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) + _, err := c.caller.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) if err != nil { return nil, errors.Wrap(err, "broadcast_tx_commit") } return result, nil } -func (c *HTTP) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *baseRPCClient) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return c.broadcastTX("broadcast_tx_async", tx) } -func (c *HTTP) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *baseRPCClient) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return c.broadcastTX("broadcast_tx_sync", tx) } -func (c *HTTP) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *baseRPCClient) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { result := new(ctypes.ResultBroadcastTx) - _, err := c.rpc.Call(route, map[string]interface{}{"tx": tx}, result) + _, err := c.caller.Call(route, map[string]interface{}{"tx": tx}, result) if err != nil { return nil, errors.Wrap(err, route) } return result, nil } -func (c *HTTP) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *baseRPCClient) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { result := new(ctypes.ResultUnconfirmedTxs) - _, err := c.rpc.Call("unconfirmed_txs", map[string]interface{}{"limit": limit}, result) + _, err := c.caller.Call("unconfirmed_txs", map[string]interface{}{"limit": limit}, result) if err != nil { return nil, errors.Wrap(err, "unconfirmed_txs") } return result, nil } -func (c *HTTP) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { +func (c *baseRPCClient) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { result := new(ctypes.ResultUnconfirmedTxs) - _, err := c.rpc.Call("num_unconfirmed_txs", map[string]interface{}{}, result) + _, err := c.caller.Call("num_unconfirmed_txs", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "num_unconfirmed_txs") } return result, nil } -func (c *HTTP) NetInfo() (*ctypes.ResultNetInfo, error) { +func (c *baseRPCClient) NetInfo() (*ctypes.ResultNetInfo, error) { result := new(ctypes.ResultNetInfo) - _, err := c.rpc.Call("net_info", map[string]interface{}{}, result) + _, err := c.caller.Call("net_info", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "NetInfo") } return result, nil } -func (c *HTTP) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { +func (c *baseRPCClient) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { result := new(ctypes.ResultDumpConsensusState) - _, err := c.rpc.Call("dump_consensus_state", map[string]interface{}{}, result) + _, err := c.caller.Call("dump_consensus_state", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "DumpConsensusState") } return result, nil } -func (c *HTTP) ConsensusState() (*ctypes.ResultConsensusState, error) { +func (c *baseRPCClient) ConsensusState() (*ctypes.ResultConsensusState, error) { result := new(ctypes.ResultConsensusState) - _, err := c.rpc.Call("consensus_state", map[string]interface{}{}, result) + _, err := c.caller.Call("consensus_state", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "ConsensusState") } return result, nil } -func (c *HTTP) Health() (*ctypes.ResultHealth, error) { +func (c *baseRPCClient) Health() (*ctypes.ResultHealth, error) { result := new(ctypes.ResultHealth) - _, err := c.rpc.Call("health", map[string]interface{}{}, result) + _, err := c.caller.Call("health", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "Health") } return result, nil } -func (c *HTTP) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c *baseRPCClient) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { result := new(ctypes.ResultBlockchainInfo) - _, err := c.rpc.Call("blockchain", + _, err := c.caller.Call("blockchain", map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, result) if err != nil { @@ -178,56 +260,56 @@ func (c *HTTP) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockch return result, nil } -func (c *HTTP) Genesis() (*ctypes.ResultGenesis, error) { +func (c *baseRPCClient) Genesis() (*ctypes.ResultGenesis, error) { result := new(ctypes.ResultGenesis) - _, err := c.rpc.Call("genesis", map[string]interface{}{}, result) + _, err := c.caller.Call("genesis", map[string]interface{}{}, result) if err != nil { return nil, errors.Wrap(err, "Genesis") } return result, nil } -func (c *HTTP) Block(height *int64) (*ctypes.ResultBlock, error) { +func (c *baseRPCClient) Block(height *int64) (*ctypes.ResultBlock, error) { result := new(ctypes.ResultBlock) - _, err := c.rpc.Call("block", map[string]interface{}{"height": height}, result) + _, err := c.caller.Call("block", map[string]interface{}{"height": height}, result) if err != nil { return nil, errors.Wrap(err, "Block") } return result, nil } -func (c *HTTP) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { +func (c *baseRPCClient) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { result := new(ctypes.ResultBlockResults) - _, err := c.rpc.Call("block_results", map[string]interface{}{"height": height}, result) + _, err := c.caller.Call("block_results", map[string]interface{}{"height": height}, result) if err != nil { return nil, errors.Wrap(err, "Block Result") } return result, nil } -func (c *HTTP) Commit(height *int64) (*ctypes.ResultCommit, error) { +func (c *baseRPCClient) Commit(height *int64) (*ctypes.ResultCommit, error) { result := new(ctypes.ResultCommit) - _, err := c.rpc.Call("commit", map[string]interface{}{"height": height}, result) + _, err := c.caller.Call("commit", map[string]interface{}{"height": height}, result) if err != nil { return nil, errors.Wrap(err, "Commit") } return result, nil } -func (c *HTTP) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { +func (c *baseRPCClient) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { result := new(ctypes.ResultTx) params := map[string]interface{}{ "hash": hash, "prove": prove, } - _, err := c.rpc.Call("tx", params, result) + _, err := c.caller.Call("tx", params, result) if err != nil { return nil, errors.Wrap(err, "Tx") } return result, nil } -func (c *HTTP) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { +func (c *baseRPCClient) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { result := new(ctypes.ResultTxSearch) params := map[string]interface{}{ "query": query, @@ -235,23 +317,24 @@ func (c *HTTP) TxSearch(query string, prove bool, page, perPage int) (*ctypes.Re "page": page, "per_page": perPage, } - _, err := c.rpc.Call("tx_search", params, result) + _, err := c.caller.Call("tx_search", params, result) if err != nil { return nil, errors.Wrap(err, "TxSearch") } return result, nil } -func (c *HTTP) Validators(height *int64) (*ctypes.ResultValidators, error) { +func (c *baseRPCClient) Validators(height *int64) (*ctypes.ResultValidators, error) { result := new(ctypes.ResultValidators) - _, err := c.rpc.Call("validators", map[string]interface{}{"height": height}, result) + _, err := c.caller.Call("validators", map[string]interface{}{"height": height}, result) if err != nil { return nil, errors.Wrap(err, "Validators") } return result, nil } -/** websocket event stuff here... **/ +//----------------------------------------------------------------------------- +// WSEvents type WSEvents struct { cmn.BaseService diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go index d57ced31..161f44fd 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/localclient.go @@ -182,7 +182,7 @@ func (c *Local) eventsRoutine(sub types.Subscription, subscriber string, q tmpub for { select { case msg := <-sub.Out(): - result := ctypes.ResultEvent{Query: q.String(), Data: msg.Data(), Tags: msg.Tags()} + result := ctypes.ResultEvent{Query: q.String(), Data: msg.Data(), Events: msg.Events()} if cap(outc) == 0 { outc <- result } else { diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index ba9bc3af..a1a48abc 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -4,6 +4,7 @@ import ( "fmt" "net/http" "strings" + "sync" "testing" "github.com/stretchr/testify/assert" @@ -11,7 +12,9 @@ import ( abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) @@ -249,7 +252,8 @@ func TestAppCalls(t *testing.T) { func TestBroadcastTxSync(t *testing.T) { require := require.New(t) - mempool := node.MempoolReactor().Mempool + // TODO (melekes): use mempool which is set on RPC rather than getting it from node + mempool := node.Mempool() initMempoolSize := mempool.Size() for i, c := range GetClients() { @@ -269,7 +273,7 @@ func TestBroadcastTxSync(t *testing.T) { func TestBroadcastTxCommit(t *testing.T) { require := require.New(t) - mempool := node.MempoolReactor().Mempool + mempool := node.Mempool() for i, c := range GetClients() { _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(tx) @@ -284,7 +288,7 @@ func TestBroadcastTxCommit(t *testing.T) { func TestUnconfirmedTxs(t *testing.T) { _, _, tx := MakeTxKV() - mempool := node.MempoolReactor().Mempool + mempool := node.Mempool() _ = mempool.CheckTx(tx, nil) for i, c := range GetClients() { @@ -305,7 +309,7 @@ func TestUnconfirmedTxs(t *testing.T) { func TestNumUnconfirmedTxs(t *testing.T) { _, _, tx := MakeTxKV() - mempool := node.MempoolReactor().Mempool + mempool := node.Mempool() _ = mempool.CheckTx(tx, nil) mempoolSize := mempool.Size() @@ -441,3 +445,100 @@ func TestTxSearch(t *testing.T) { require.Len(t, result.Txs, 0) } } + +func TestBatchedJSONRPCCalls(t *testing.T) { + c := getHTTPClient() + testBatchedJSONRPCCalls(t, c) +} + +func testBatchedJSONRPCCalls(t *testing.T, c *client.HTTP) { + k1, v1, tx1 := MakeTxKV() + k2, v2, tx2 := MakeTxKV() + + batch := c.NewBatch() + r1, err := batch.BroadcastTxCommit(tx1) + require.NoError(t, err) + r2, err := batch.BroadcastTxCommit(tx2) + require.NoError(t, err) + require.Equal(t, 2, batch.Count()) + bresults, err := batch.Send() + require.NoError(t, err) + require.Len(t, bresults, 2) + require.Equal(t, 0, batch.Count()) + + bresult1, ok := bresults[0].(*ctypes.ResultBroadcastTxCommit) + require.True(t, ok) + require.Equal(t, *bresult1, *r1) + bresult2, ok := bresults[1].(*ctypes.ResultBroadcastTxCommit) + require.True(t, ok) + require.Equal(t, *bresult2, *r2) + apph := cmn.MaxInt64(bresult1.Height, bresult2.Height) + 1 + + client.WaitForHeight(c, apph, nil) + + q1, err := batch.ABCIQuery("/key", k1) + require.NoError(t, err) + q2, err := batch.ABCIQuery("/key", k2) + require.NoError(t, err) + require.Equal(t, 2, batch.Count()) + qresults, err := batch.Send() + require.NoError(t, err) + require.Len(t, qresults, 2) + require.Equal(t, 0, batch.Count()) + + qresult1, ok := qresults[0].(*ctypes.ResultABCIQuery) + require.True(t, ok) + require.Equal(t, *qresult1, *q1) + qresult2, ok := qresults[1].(*ctypes.ResultABCIQuery) + require.True(t, ok) + require.Equal(t, *qresult2, *q2) + + require.Equal(t, qresult1.Response.Key, k1) + require.Equal(t, qresult2.Response.Key, k2) + require.Equal(t, qresult1.Response.Value, v1) + require.Equal(t, qresult2.Response.Value, v2) +} + +func TestBatchedJSONRPCCallsCancellation(t *testing.T) { + c := getHTTPClient() + _, _, tx1 := MakeTxKV() + _, _, tx2 := MakeTxKV() + + batch := c.NewBatch() + _, err := batch.BroadcastTxCommit(tx1) + require.NoError(t, err) + _, err = batch.BroadcastTxCommit(tx2) + require.NoError(t, err) + // we should have 2 requests waiting + require.Equal(t, 2, batch.Count()) + // we want to make sure we cleared 2 pending requests + require.Equal(t, 2, batch.Clear()) + // now there should be no batched requests + require.Equal(t, 0, batch.Count()) +} + +func TestSendingEmptyJSONRPCRequestBatch(t *testing.T) { + c := getHTTPClient() + batch := c.NewBatch() + _, err := batch.Send() + require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") +} + +func TestClearingEmptyJSONRPCRequestBatch(t *testing.T) { + c := getHTTPClient() + batch := c.NewBatch() + require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") +} + +func TestConcurrentJSONRPCBatching(t *testing.T) { + var wg sync.WaitGroup + c := getHTTPClient() + for i := 0; i < 50; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testBatchedJSONRPCCalls(t, c) + }() + } + wg.Wait() +} diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index 40b6811d..16ebe9c5 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -339,7 +339,8 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro // If no height is provided, it will fetch results for the latest block. // // Results are for the height of the block containing the txs. -// Thus response.results[5] is the results of executing getBlock(h).Txs[5] +// Thus response.results.deliver_tx[5] is the results of executing +// getBlock(h).Txs[5] // // ```shell // curl 'localhost:26657/block_results?height=10' @@ -360,17 +361,27 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro // // ```json // { -// "height": "10", -// "results": [ -// { -// "code": "0", -// "data": "CAFE00F00D" -// }, -// { -// "code": "102", -// "data": "" +// "jsonrpc": "2.0", +// "id": "", +// "result": { +// "height": "39", +// "results": { +// "deliver_tx": [ +// { +// "tags": [ +// { +// "key": "YXBwLmNyZWF0b3I=", +// "value": "Q29zbW9zaGkgTmV0b3dva28=" +// } +// ] +// } +// ], +// "end_block": { +// "validator_updates": null +// }, +// "begin_block": {} +// } // } -// ] // } // ``` func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { @@ -380,7 +391,6 @@ func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockR return nil, err } - // load the results results, err := sm.LoadABCIResponses(stateDB, height) if err != nil { return nil, err diff --git a/rpc/core/events.go b/rpc/core/events.go index 6bc5ecc7..acb90b46 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -22,26 +22,83 @@ import ( // string (escaped with single quotes), number, date or time. // // Examples: -// tm.event = 'NewBlock' # new blocks -// tm.event = 'CompleteProposal' # node got a complete proposal +// tm.event = 'NewBlock' # new blocks +// tm.event = 'CompleteProposal' # node got a complete proposal // tm.event = 'Tx' AND tx.hash = 'XYZ' # single transaction -// tm.event = 'Tx' AND tx.height = 5 # all txs of the fifth block -// tx.height = 5 # all txs of the fifth block +// tm.event = 'Tx' AND tx.height = 5 # all txs of the fifth block +// tx.height = 5 # all txs of the fifth block // // Tendermint provides a few predefined keys: tm.event, tx.hash and tx.height. -// Note for transactions, you can define additional keys by providing tags with +// Note for transactions, you can define additional keys by providing events with // DeliverTx response. // -// DeliverTx{ -// Tags: []*KVPair{ -// "agent.name": "K", -// } -// } +// import ( +// abci "github.com/tendermint/tendermint/abci/types" +// "github.com/tendermint/tendermint/libs/pubsub/query" +// ) // -// tm.event = 'Tx' AND agent.name = 'K' -// tm.event = 'Tx' AND account.created_at >= TIME 2013-05-03T14:45:00Z -// tm.event = 'Tx' AND contract.sign_date = DATE 2017-01-01 -// tm.event = 'Tx' AND account.owner CONTAINS 'Igor' +// abci.ResponseDeliverTx{ +// Events: []abci.Event{ +// { +// Type: "rewards.withdraw", +// Attributes: cmn.KVPairs{ +// cmn.KVPair{Key: []byte("address"), Value: []byte("AddrA")}, +// cmn.KVPair{Key: []byte("source"), Value: []byte("SrcX")}, +// cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, +// cmn.KVPair{Key: []byte("balance"), Value: []byte("...")}, +// }, +// }, +// { +// Type: "rewards.withdraw", +// Attributes: cmn.KVPairs{ +// cmn.KVPair{Key: []byte("address"), Value: []byte("AddrB")}, +// cmn.KVPair{Key: []byte("source"), Value: []byte("SrcY")}, +// cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, +// cmn.KVPair{Key: []byte("balance"), Value: []byte("...")}, +// }, +// }, +// { +// Type: "transfer", +// Attributes: cmn.KVPairs{ +// cmn.KVPair{Key: []byte("sender"), Value: []byte("AddrC")}, +// cmn.KVPair{Key: []byte("recipient"), Value: []byte("AddrD")}, +// cmn.KVPair{Key: []byte("amount"), Value: []byte("...")}, +// }, +// }, +// }, +// } +// +// All events are indexed by a composite key of the form {eventType}.{evenAttrKey}. +// In the above examples, the following keys would be indexed: +// - rewards.withdraw.address +// - rewards.withdraw.source +// - rewards.withdraw.amount +// - rewards.withdraw.balance +// - transfer.sender +// - transfer.recipient +// - transfer.amount +// +// Multiple event types with duplicate keys are allowed and are meant to +// categorize unique and distinct events. In the above example, all events +// indexed under the key `rewards.withdraw.address` will have the following +// values stored and queryable: +// +// - AddrA +// - AddrB +// +// To create a query for txs where address AddrA withdrew rewards: +// query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA'") +// +// To create a query for txs where address AddrA withdrew rewards from source Y: +// query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA' AND rewards.withdraw.source = 'Y'") +// +// To create a query for txs where AddrA transferred funds: +// query.MustParse("tm.event = 'Tx' AND transfer.sender = 'AddrA'") +// +// The following queries would return no results: +// query.MustParse("tm.event = 'Tx' AND transfer.sender = 'AddrZ'") +// query.MustParse("tm.event = 'Tx' AND rewards.withdraw.address = 'AddrZ'") +// query.MustParse("tm.event = 'Tx' AND rewards.withdraw.source = 'W'") // // See list of all possible events here // https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants @@ -50,7 +107,6 @@ import ( // https://godoc.org/github.com/tendermint/tendermint/libs/pubsub/query. // // ```go -// import "github.com/tendermint/tendermint/libs/pubsub/query" // import "github.com/tendermint/tendermint/types" // // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") @@ -59,15 +115,17 @@ import ( // // handle error // } // defer client.Stop() -// ctx, cancel := context.WithTimeout(context.Background(), timeout) +// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) // defer cancel() -// query := query.MustParse("tm.event = 'Tx' AND tx.height = 3") -// txs := make(chan interface{}) -// err = client.Subscribe(ctx, "test-client", query, txs) +// query := "tm.event = 'Tx' AND tx.height = 3" +// txs, err := client.Subscribe(ctx, "test-client", query) +// if err != nil { +// // handle error +// } // // go func() { // for e := range txs { -// fmt.Println("got ", e.(types.EventDataTx)) +// fmt.Println("got ", e.Data.(types.EventDataTx)) // } // }() // ``` @@ -105,8 +163,10 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er if err != nil { return nil, errors.Wrap(err, "failed to parse query") } + subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) defer cancel() + sub, err := eventBus.Subscribe(subCtx, addr, q) if err != nil { return nil, err @@ -116,7 +176,7 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er for { select { case msg := <-sub.Out(): - resultEvent := &ctypes.ResultEvent{Query: query, Data: msg.Data(), Tags: msg.Tags()} + resultEvent := &ctypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} ctx.WSConn.TryWriteRPCResponse( rpctypes.NewRPCSuccessResponse( ctx.WSConn.Codec(), @@ -154,7 +214,11 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er // // handle error // } // defer client.Stop() -// err = client.Unsubscribe("test-client", query) +// query := "tm.event = 'Tx' AND tx.height = 3" +// err = client.Unsubscribe(context.Background(), "test-client", query) +// if err != nil { +// // handle error +// } // ``` // // > The above command returns JSON structured like this: @@ -198,7 +262,10 @@ func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe // // handle error // } // defer client.Stop() -// err = client.UnsubscribeAll("test-client") +// err = client.UnsubscribeAll(context.Background(), "test-client") +// if err != nil { +// // handle error +// } // ``` // // > The above command returns JSON structured like this: diff --git a/rpc/core/net.go b/rpc/core/net.go index 23bc40e8..16523061 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -184,10 +184,8 @@ func UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialS if len(seeds) == 0 { return &ctypes.ResultDialSeeds{}, errors.New("No seeds provided") } - // starts go routines to dial each peer after random delays - logger.Info("DialSeeds", "addrBook", addrBook, "seeds", seeds) - err := p2pPeers.DialPeersAsync(addrBook, seeds, false) - if err != nil { + logger.Info("DialSeeds", "seeds", seeds) + if err := p2pPeers.DialPeersAsync(seeds); err != nil { return &ctypes.ResultDialSeeds{}, err } return &ctypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil @@ -197,10 +195,13 @@ func UnsafeDialPeers(ctx *rpctypes.Context, peers []string, persistent bool) (*c if len(peers) == 0 { return &ctypes.ResultDialPeers{}, errors.New("No peers provided") } - // starts go routines to dial each peer after random delays - logger.Info("DialPeers", "addrBook", addrBook, "peers", peers, "persistent", persistent) - err := p2pPeers.DialPeersAsync(addrBook, peers, persistent) - if err != nil { + logger.Info("DialPeers", "peers", peers, "persistent", persistent) + if persistent { + if err := p2pPeers.AddPersistentPeers(peers); err != nil { + return &ctypes.ResultDialPeers{}, err + } + } + if err := p2pPeers.DialPeersAsync(peers); err != nil { return &ctypes.ResultDialPeers{}, err } return &ctypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil diff --git a/rpc/core/net_test.go b/rpc/core/net_test.go new file mode 100644 index 00000000..651e1f69 --- /dev/null +++ b/rpc/core/net_test.go @@ -0,0 +1,73 @@ +package core + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + rpctypes "github.com/tendermint/tendermint/rpc/lib/types" +) + +func TestUnsafeDialSeeds(t *testing.T) { + sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", + func(n int, sw *p2p.Switch) *p2p.Switch { return sw }) + err := sw.Start() + require.NoError(t, err) + defer sw.Stop() + + logger = log.TestingLogger() + p2pPeers = sw + + testCases := []struct { + seeds []string + isErr bool + }{ + {[]string{}, true}, + {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, false}, + {[]string{"127.0.0.1:41198"}, true}, + } + + for _, tc := range testCases { + res, err := UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds) + if tc.isErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.NotNil(t, res) + } + } +} + +func TestUnsafeDialPeers(t *testing.T) { + sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", + func(n int, sw *p2p.Switch) *p2p.Switch { return sw }) + err := sw.Start() + require.NoError(t, err) + defer sw.Stop() + + logger = log.TestingLogger() + p2pPeers = sw + + testCases := []struct { + peers []string + isErr bool + }{ + {[]string{}, true}, + {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, false}, + {[]string{"127.0.0.1:41198"}, true}, + } + + for _, tc := range testCases { + res, err := UnsafeDialPeers(&rpctypes.Context{}, tc.peers, false) + if tc.isErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.NotNil(t, res) + } + } +} diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index ad8afdef..28a492e6 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -44,7 +44,8 @@ type transport interface { } type peers interface { - DialPeersAsync(p2p.AddrBook, []string, bool) error + AddPersistentPeers([]string) error + DialPeersAsync([]string) error NumPeers() (outbound, inbound, dialig int) Peers() p2p.IPeerSet } @@ -72,7 +73,7 @@ var ( txIndexer txindex.TxIndexer consensusReactor *consensus.ConsensusReactor eventBus *types.EventBus // thread safe - mempool *mempl.Mempool + mempool mempl.Mempool logger log.Logger @@ -87,7 +88,7 @@ func SetBlockStore(bs sm.BlockStore) { blockStore = bs } -func SetMempool(mem *mempl.Mempool) { +func SetMempool(mem mempl.Mempool) { mempool = mem } diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 74457b38..f1ae16a3 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -205,7 +205,7 @@ type ( // Event data from a subscription type ResultEvent struct { - Query string `json:"query"` - Data types.TMEventData `json:"data"` - Tags map[string]string `json:"tags"` + Query string `json:"query"` + Data types.TMEventData `json:"data"` + Events map[string][]string `json:"events"` } diff --git a/rpc/lib/client/http_client.go b/rpc/lib/client/http_client.go index cfa26e89..3b545a5d 100644 --- a/rpc/lib/client/http_client.go +++ b/rpc/lib/client/http_client.go @@ -10,10 +10,13 @@ import ( "net/url" "reflect" "strings" + "sync" "github.com/pkg/errors" + amino "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" types "github.com/tendermint/tendermint/rpc/lib/types" ) @@ -83,25 +86,56 @@ func makeHTTPClient(remoteAddr string) (string, *http.Client) { //------------------------------------------------------------------------------------ +// jsonRPCBufferedRequest encapsulates a single buffered request, as well as its +// anticipated response structure. +type jsonRPCBufferedRequest struct { + request types.RPCRequest + result interface{} // The result will be deserialized into this object. +} + +// JSONRPCRequestBatch allows us to buffer multiple request/response structures +// into a single batch request. Note that this batch acts like a FIFO queue, and +// is thread-safe. +type JSONRPCRequestBatch struct { + client *JSONRPCClient + + mtx sync.Mutex + requests []*jsonRPCBufferedRequest +} + // JSONRPCClient takes params as a slice type JSONRPCClient struct { address string client *http.Client + id types.JSONRPCStringID cdc *amino.Codec } +// JSONRPCCaller implementers can facilitate calling the JSON RPC endpoint. +type JSONRPCCaller interface { + Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) +} + +// Both JSONRPCClient and JSONRPCRequestBatch can facilitate calls to the JSON +// RPC endpoint. +var _ JSONRPCCaller = (*JSONRPCClient)(nil) +var _ JSONRPCCaller = (*JSONRPCRequestBatch)(nil) + // NewJSONRPCClient returns a JSONRPCClient pointed at the given address. func NewJSONRPCClient(remote string) *JSONRPCClient { address, client := makeHTTPClient(remote) return &JSONRPCClient{ address: address, client: client, + id: types.JSONRPCStringID("jsonrpc-client-" + cmn.RandStr(8)), cdc: amino.NewCodec(), } } +// Call will send the request for the given method through to the RPC endpoint +// immediately, without buffering of requests. func (c *JSONRPCClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { - request, err := types.MapToRequest(c.cdc, types.JSONRPCStringID("jsonrpc-client"), method, params) + request, err := types.MapToRequest(c.cdc, c.id, method, params) if err != nil { return nil, err } @@ -109,9 +143,7 @@ func (c *JSONRPCClient) Call(method string, params map[string]interface{}, resul if err != nil { return nil, err } - // log.Info(string(requestBytes)) requestBuf := bytes.NewBuffer(requestBytes) - // log.Info(Fmt("RPC request to %v (%v): %v", c.remote, method, string(requestBytes))) httpResponse, err := c.client.Post(c.address, "text/json", requestBuf) if err != nil { return nil, err @@ -122,8 +154,40 @@ func (c *JSONRPCClient) Call(method string, params map[string]interface{}, resul if err != nil { return nil, err } - // log.Info(Fmt("RPC response: %v", string(responseBytes))) - return unmarshalResponseBytes(c.cdc, responseBytes, result) + return unmarshalResponseBytes(c.cdc, responseBytes, c.id, result) +} + +// NewRequestBatch starts a batch of requests for this client. +func (c *JSONRPCClient) NewRequestBatch() *JSONRPCRequestBatch { + return &JSONRPCRequestBatch{ + requests: make([]*jsonRPCBufferedRequest, 0), + client: c, + } +} + +func (c *JSONRPCClient) sendBatch(requests []*jsonRPCBufferedRequest) ([]interface{}, error) { + reqs := make([]types.RPCRequest, 0, len(requests)) + results := make([]interface{}, 0, len(requests)) + for _, req := range requests { + reqs = append(reqs, req.request) + results = append(results, req.result) + } + // serialize the array of requests into a single JSON object + requestBytes, err := json.Marshal(reqs) + if err != nil { + return nil, err + } + httpResponse, err := c.client.Post(c.address, "text/json", bytes.NewBuffer(requestBytes)) + if err != nil { + return nil, err + } + defer httpResponse.Body.Close() // nolint: errcheck + + responseBytes, err := ioutil.ReadAll(httpResponse.Body) + if err != nil { + return nil, err + } + return unmarshalResponseBytesArray(c.cdc, responseBytes, c.id, results) } func (c *JSONRPCClient) Codec() *amino.Codec { @@ -136,6 +200,57 @@ func (c *JSONRPCClient) SetCodec(cdc *amino.Codec) { //------------------------------------------------------------- +// Count returns the number of enqueued requests waiting to be sent. +func (b *JSONRPCRequestBatch) Count() int { + b.mtx.Lock() + defer b.mtx.Unlock() + return len(b.requests) +} + +func (b *JSONRPCRequestBatch) enqueue(req *jsonRPCBufferedRequest) { + b.mtx.Lock() + defer b.mtx.Unlock() + b.requests = append(b.requests, req) +} + +// Clear empties out the request batch. +func (b *JSONRPCRequestBatch) Clear() int { + b.mtx.Lock() + defer b.mtx.Unlock() + return b.clear() +} + +func (b *JSONRPCRequestBatch) clear() int { + count := len(b.requests) + b.requests = make([]*jsonRPCBufferedRequest, 0) + return count +} + +// Send will attempt to send the current batch of enqueued requests, and then +// will clear out the requests once done. On success, this returns the +// deserialized list of results from each of the enqueued requests. +func (b *JSONRPCRequestBatch) Send() ([]interface{}, error) { + b.mtx.Lock() + defer func() { + b.clear() + b.mtx.Unlock() + }() + return b.client.sendBatch(b.requests) +} + +// Call enqueues a request to call the given RPC method with the specified +// parameters, in the same way that the `JSONRPCClient.Call` function would. +func (b *JSONRPCRequestBatch) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { + request, err := types.MapToRequest(b.client.cdc, b.client.id, method, params) + if err != nil { + return nil, err + } + b.enqueue(&jsonRPCBufferedRequest{request: request, result: result}) + return result, nil +} + +//------------------------------------------------------------- + // URI takes params as a map type URIClient struct { address string @@ -168,7 +283,7 @@ func (c *URIClient) Call(method string, params map[string]interface{}, result in if err != nil { return nil, err } - return unmarshalResponseBytes(c.cdc, responseBytes, result) + return unmarshalResponseBytes(c.cdc, responseBytes, "", result) } func (c *URIClient) Codec() *amino.Codec { @@ -181,7 +296,7 @@ func (c *URIClient) SetCodec(cdc *amino.Codec) { //------------------------------------------------ -func unmarshalResponseBytes(cdc *amino.Codec, responseBytes []byte, result interface{}) (interface{}, error) { +func unmarshalResponseBytes(cdc *amino.Codec, responseBytes []byte, expectedID types.JSONRPCStringID, result interface{}) (interface{}, error) { // Read response. If rpc/core/types is imported, the result will unmarshal // into the correct type. // log.Notice("response", "response", string(responseBytes)) @@ -189,19 +304,71 @@ func unmarshalResponseBytes(cdc *amino.Codec, responseBytes []byte, result inter response := &types.RPCResponse{} err = json.Unmarshal(responseBytes, response) if err != nil { - return nil, errors.Errorf("Error unmarshalling rpc response: %v", err) + return nil, errors.Wrap(err, "error unmarshalling rpc response") } if response.Error != nil { - return nil, errors.Errorf("Response error: %v", response.Error) + return nil, errors.Wrap(response.Error, "response error") + } + // From the JSON-RPC 2.0 spec: + // id: It MUST be the same as the value of the id member in the Request Object. + if err := validateResponseID(response, expectedID); err != nil { + return nil, err } // Unmarshal the RawMessage into the result. err = cdc.UnmarshalJSON(response.Result, result) if err != nil { - return nil, errors.Errorf("Error unmarshalling rpc response result: %v", err) + return nil, errors.Wrap(err, "error unmarshalling rpc response result") } return result, nil } +func unmarshalResponseBytesArray(cdc *amino.Codec, responseBytes []byte, expectedID types.JSONRPCStringID, results []interface{}) ([]interface{}, error) { + var ( + err error + responses []types.RPCResponse + ) + err = json.Unmarshal(responseBytes, &responses) + if err != nil { + return nil, errors.Wrap(err, "error unmarshalling rpc response") + } + // No response error checking here as there may be a mixture of successful + // and unsuccessful responses. + + if len(results) != len(responses) { + return nil, errors.Errorf("expected %d result objects into which to inject responses, but got %d", len(responses), len(results)) + } + + for i, response := range responses { + // From the JSON-RPC 2.0 spec: + // id: It MUST be the same as the value of the id member in the Request Object. + if err := validateResponseID(&response, expectedID); err != nil { + return nil, errors.Wrapf(err, "failed to validate response ID in response %d", i) + } + if err := cdc.UnmarshalJSON(responses[i].Result, results[i]); err != nil { + return nil, errors.Wrap(err, "error unmarshalling rpc response result") + } + } + return results, nil +} + +func validateResponseID(res *types.RPCResponse, expectedID types.JSONRPCStringID) error { + // we only validate a response ID if the expected ID is non-empty + if len(expectedID) == 0 { + return nil + } + if res.ID == nil { + return errors.Errorf("missing ID in response") + } + id, ok := res.ID.(types.JSONRPCStringID) + if !ok { + return errors.Errorf("expected ID string in response but got: %v", id) + } + if expectedID != id { + return errors.Errorf("response ID (%s) does not match request ID (%s)", id, expectedID) + } + return nil +} + func argsToURLValues(cdc *amino.Codec, args map[string]interface{}) (url.Values, error) { values := make(url.Values) if len(args) == 0 { diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go index 6391b009..c1c1ebf1 100644 --- a/rpc/lib/server/handlers.go +++ b/rpc/lib/server/handlers.go @@ -103,7 +103,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo return func(w http.ResponseWriter, r *http.Request) { b, err := ioutil.ReadAll(r.Body) if err != nil { - WriteRPCResponseHTTP(w, types.RPCInvalidRequestError(types.JSONRPCStringID(""), errors.Wrap(err, "Error reading request body"))) + WriteRPCResponseHTTP(w, types.RPCInvalidRequestError(types.JSONRPCStringID(""), errors.Wrap(err, "error reading request body"))) return } // if its an empty request (like from a browser), @@ -113,49 +113,59 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo return } - var request types.RPCRequest - err = json.Unmarshal(b, &request) - if err != nil { - WriteRPCResponseHTTP(w, types.RPCParseError(types.JSONRPCStringID(""), errors.Wrap(err, "Error unmarshalling request"))) - return - } - // A Notification is a Request object without an "id" member. - // The Server MUST NOT reply to a Notification, including those that are within a batch request. - if request.ID == types.JSONRPCStringID("") { - logger.Debug("HTTPJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)") - return - } - if len(r.URL.Path) > 1 { - WriteRPCResponseHTTP(w, types.RPCInvalidRequestError(request.ID, errors.Errorf("Path %s is invalid", r.URL.Path))) - return - } - - rpcFunc := funcMap[request.Method] - if rpcFunc == nil || rpcFunc.ws { - WriteRPCResponseHTTP(w, types.RPCMethodNotFoundError(request.ID)) - return - } - - ctx := &types.Context{JSONReq: &request, HTTPReq: r} - args := []reflect.Value{reflect.ValueOf(ctx)} - if len(request.Params) > 0 { - fnArgs, err := jsonParamsToArgs(rpcFunc, cdc, request.Params) - if err != nil { - WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(request.ID, errors.Wrap(err, "Error converting json params to arguments"))) + // first try to unmarshal the incoming request as an array of RPC requests + var ( + requests []types.RPCRequest + responses []types.RPCResponse + ) + if err := json.Unmarshal(b, &requests); err != nil { + // next, try to unmarshal as a single request + var request types.RPCRequest + if err := json.Unmarshal(b, &request); err != nil { + WriteRPCResponseHTTP(w, types.RPCParseError(types.JSONRPCStringID(""), errors.Wrap(err, "error unmarshalling request"))) return } - args = append(args, fnArgs...) + requests = []types.RPCRequest{request} } - returns := rpcFunc.f.Call(args) - - logger.Info("HTTPJSONRPC", "method", request.Method, "args", args, "returns", returns) - result, err := unreflectResult(returns) - if err != nil { - WriteRPCResponseHTTP(w, types.RPCInternalError(request.ID, err)) - return + for _, request := range requests { + // A Notification is a Request object without an "id" member. + // The Server MUST NOT reply to a Notification, including those that are within a batch request. + if request.ID == types.JSONRPCStringID("") { + logger.Debug("HTTPJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)") + continue + } + if len(r.URL.Path) > 1 { + responses = append(responses, types.RPCInvalidRequestError(request.ID, errors.Errorf("path %s is invalid", r.URL.Path))) + continue + } + rpcFunc, ok := funcMap[request.Method] + if !ok || rpcFunc.ws { + responses = append(responses, types.RPCMethodNotFoundError(request.ID)) + continue + } + ctx := &types.Context{JSONReq: &request, HTTPReq: r} + args := []reflect.Value{reflect.ValueOf(ctx)} + if len(request.Params) > 0 { + fnArgs, err := jsonParamsToArgs(rpcFunc, cdc, request.Params) + if err != nil { + responses = append(responses, types.RPCInvalidParamsError(request.ID, errors.Wrap(err, "error converting json params to arguments"))) + continue + } + args = append(args, fnArgs...) + } + returns := rpcFunc.f.Call(args) + logger.Info("HTTPJSONRPC", "method", request.Method, "args", args, "returns", returns) + result, err := unreflectResult(returns) + if err != nil { + responses = append(responses, types.RPCInternalError(request.ID, err)) + continue + } + responses = append(responses, types.NewRPCSuccessResponse(cdc, request.ID, result)) + } + if len(responses) > 0 { + WriteRPCResponseArrayHTTP(w, responses) } - WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(cdc, request.ID, result)) } } @@ -194,7 +204,7 @@ func mapParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params map[string]json. func arrayParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params []json.RawMessage, argsOffset int) ([]reflect.Value, error) { if len(rpcFunc.argNames) != len(params) { - return nil, errors.Errorf("Expected %v parameters (%v), got %v (%v)", + return nil, errors.Errorf("expected %v parameters (%v), got %v (%v)", len(rpcFunc.argNames), rpcFunc.argNames, len(params), params) } @@ -236,7 +246,7 @@ func jsonParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, raw []byte) ([]reflect } // Otherwise, bad format, we cannot parse - return nil, errors.Errorf("Unknown type for JSON params: %v. Expected map or array", err) + return nil, errors.Errorf("unknown type for JSON params: %v. Expected map or array", err) } // rpc.json @@ -261,7 +271,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, cdc *amino.Codec, logger log.Logger) func fnArgs, err := httpParamsToArgs(rpcFunc, cdc, r) if err != nil { - WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(types.JSONRPCStringID(""), errors.Wrap(err, "Error converting http params to arguments"))) + WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(types.JSONRPCStringID(""), errors.Wrap(err, "error converting http params to arguments"))) return } args = append(args, fnArgs...) @@ -372,7 +382,7 @@ func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect if isHexString { if !expectingString && !expectingByteSlice { - err := errors.Errorf("Got a hex string arg, but expected '%s'", + err := errors.Errorf("got a hex string arg, but expected '%s'", rt.Kind().String()) return reflect.ValueOf(nil), err, false } @@ -631,7 +641,7 @@ func (wsc *wsConnection) readRoutine() { var request types.RPCRequest err = json.Unmarshal(in, &request) if err != nil { - wsc.WriteRPCResponse(types.RPCParseError(types.JSONRPCStringID(""), errors.Wrap(err, "Error unmarshaling request"))) + wsc.WriteRPCResponse(types.RPCParseError(types.JSONRPCStringID(""), errors.Wrap(err, "error unmarshaling request"))) continue } @@ -654,7 +664,7 @@ func (wsc *wsConnection) readRoutine() { if len(request.Params) > 0 { fnArgs, err := jsonParamsToArgs(rpcFunc, wsc.cdc, request.Params) if err != nil { - wsc.WriteRPCResponse(types.RPCInternalError(request.ID, errors.Wrap(err, "Error converting json params to arguments"))) + wsc.WriteRPCResponse(types.RPCInternalError(request.ID, errors.Wrap(err, "error converting json params to arguments"))) continue } args = append(args, fnArgs...) diff --git a/rpc/lib/server/handlers_test.go b/rpc/lib/server/handlers_test.go index f8ad0610..9cded295 100644 --- a/rpc/lib/server/handlers_test.go +++ b/rpc/lib/server/handlers_test.go @@ -154,6 +154,72 @@ func TestRPCNotification(t *testing.T) { require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server") } +func TestRPCNotificationInBatch(t *testing.T) { + mux := testMux() + tests := []struct { + payload string + expectCount int + }{ + { + `[ + {"jsonrpc": "2.0","id": ""}, + {"jsonrpc": "2.0","method":"c","id":"abc","params":["a","10"]} + ]`, + 1, + }, + { + `[ + {"jsonrpc": "2.0","id": ""}, + {"jsonrpc": "2.0","method":"c","id":"abc","params":["a","10"]}, + {"jsonrpc": "2.0","id": ""}, + {"jsonrpc": "2.0","method":"c","id":"abc","params":["a","10"]} + ]`, + 2, + }, + } + for i, tt := range tests { + req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + // Always expecting back a JSONRPCResponse + assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) + blob, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("#%d: err reading body: %v", i, err) + continue + } + + var responses []types.RPCResponse + // try to unmarshal an array first + err = json.Unmarshal(blob, &responses) + if err != nil { + // if we were actually expecting an array, but got an error + if tt.expectCount > 1 { + t.Errorf("#%d: expected an array, couldn't unmarshal it\nblob: %s", i, blob) + continue + } else { + // we were expecting an error here, so let's unmarshal a single response + var response types.RPCResponse + err = json.Unmarshal(blob, &response) + if err != nil { + t.Errorf("#%d: expected successful parsing of an RPCResponse\nblob: %s", i, blob) + continue + } + // have a single-element result + responses = []types.RPCResponse{response} + } + } + if tt.expectCount != len(responses) { + t.Errorf("#%d: expected %d response(s), but got %d\nblob: %s", i, tt.expectCount, len(responses), blob) + continue + } + for _, response := range responses { + assert.NotEqual(t, response, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + } + } +} + func TestUnknownRPCPath(t *testing.T) { mux := testMux() req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", nil) diff --git a/rpc/lib/server/http_params.go b/rpc/lib/server/http_params.go index 3c948c0b..8ade41c7 100644 --- a/rpc/lib/server/http_params.go +++ b/rpc/lib/server/http_params.go @@ -76,7 +76,7 @@ func GetParamUint(r *http.Request, param string) (uint, error) { func GetParamRegexp(r *http.Request, param string, re *regexp.Regexp) (string, error) { s := GetParam(r, param) if !re.MatchString(s) { - return "", errors.Errorf(param, "Did not match regular expression %v", re.String()) + return "", errors.Errorf(param, "did not match regular expression %v", re.String()) } return s, nil } diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go index c4bb6fa1..7825605e 100644 --- a/rpc/lib/server/http_server.go +++ b/rpc/lib/server/http_server.go @@ -98,7 +98,9 @@ func WriteRPCResponseHTTPError( w.Header().Set("Content-Type", "application/json") w.WriteHeader(httpCode) - w.Write(jsonBytes) // nolint: errcheck, gas + if _, err := w.Write(jsonBytes); err != nil { + panic(err) + } } func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { @@ -108,12 +110,33 @@ func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { } w.Header().Set("Content-Type", "application/json") w.WriteHeader(200) - w.Write(jsonBytes) // nolint: errcheck, gas + if _, err := w.Write(jsonBytes); err != nil { + panic(err) + } +} + +// WriteRPCResponseArrayHTTP will do the same as WriteRPCResponseHTTP, except it +// can write arrays of responses for batched request/response interactions via +// the JSON RPC. +func WriteRPCResponseArrayHTTP(w http.ResponseWriter, res []types.RPCResponse) { + if len(res) == 1 { + WriteRPCResponseHTTP(w, res[0]) + } else { + jsonBytes, err := json.MarshalIndent(res, "", " ") + if err != nil { + panic(err) + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + if _, err := w.Write(jsonBytes); err != nil { + panic(err) + } + } } //----------------------------------------------------------------------------- -// Wraps an HTTP handler, adding error logging. +// RecoverAndLogHandler wraps an HTTP handler, adding error logging. // If the inner function panics, the outer function recovers, logs, sends an // HTTP 500 error response. func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler { @@ -191,14 +214,14 @@ func Listen(addr string, config *Config) (listener net.Listener, err error) { parts := strings.SplitN(addr, "://", 2) if len(parts) != 2 { return nil, errors.Errorf( - "Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", + "invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", addr, ) } proto, addr := parts[0], parts[1] listener, err = net.Listen(proto, addr) if err != nil { - return nil, errors.Errorf("Failed to listen on %v: %v", addr, err) + return nil, errors.Errorf("failed to listen on %v: %v", addr, err) } if config.MaxOpenConnections > 0 { listener = netutil.LimitListener(listener, config.MaxOpenConnections) diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 10d16562..033015d1 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -8,9 +8,8 @@ import ( "strings" "time" - "github.com/tendermint/tendermint/libs/log" - abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" @@ -23,7 +22,18 @@ import ( rpcclient "github.com/tendermint/tendermint/rpc/lib/client" ) +// Options helps with specifying some parameters for our RPC testing for greater +// control. +type Options struct { + suppressStdout bool + recreateConfig bool +} + var globalConfig *cfg.Config +var defaultOptions = Options{ + suppressStdout: false, + recreateConfig: false, +} func waitForRPC() { laddr := GetConfig().RPC.ListenAddress @@ -77,19 +87,24 @@ func makeAddrs() (string, string, string) { fmt.Sprintf("tcp://0.0.0.0:%d", randPort()) } -// GetConfig returns a config for the test cases as a singleton -func GetConfig() *cfg.Config { - if globalConfig == nil { - pathname := makePathname() - globalConfig = cfg.ResetTestRoot(pathname) +func createConfig() *cfg.Config { + pathname := makePathname() + c := cfg.ResetTestRoot(pathname) - // and we use random ports to run in parallel - tm, rpc, grpc := makeAddrs() - globalConfig.P2P.ListenAddress = tm - globalConfig.RPC.ListenAddress = rpc - globalConfig.RPC.CORSAllowedOrigins = []string{"https://tendermint.com/"} - globalConfig.RPC.GRPCListenAddress = grpc - globalConfig.TxIndex.IndexTags = "app.creator,tx.height" // see kvstore application + // and we use random ports to run in parallel + tm, rpc, grpc := makeAddrs() + c.P2P.ListenAddress = tm + c.RPC.ListenAddress = rpc + c.RPC.CORSAllowedOrigins = []string{"https://tendermint.com/"} + c.RPC.GRPCListenAddress = grpc + c.TxIndex.IndexTags = "app.creator,tx.height" // see kvstore application + return c +} + +// GetConfig returns a config for the test cases as a singleton +func GetConfig(forceCreate ...bool) *cfg.Config { + if globalConfig == nil || (len(forceCreate) > 0 && forceCreate[0]) { + globalConfig = createConfig() } return globalConfig } @@ -100,8 +115,12 @@ func GetGRPCClient() core_grpc.BroadcastAPIClient { } // StartTendermint starts a test tendermint server in a go routine and returns when it is initialized -func StartTendermint(app abci.Application) *nm.Node { - node := NewTendermint(app) +func StartTendermint(app abci.Application, opts ...func(*Options)) *nm.Node { + nodeOpts := defaultOptions + for _, opt := range opts { + opt(&nodeOpts) + } + node := NewTendermint(app, &nodeOpts) err := node.Start() if err != nil { panic(err) @@ -111,7 +130,9 @@ func StartTendermint(app abci.Application) *nm.Node { waitForRPC() waitForGRPC() - fmt.Println("Tendermint running!") + if !nodeOpts.suppressStdout { + fmt.Println("Tendermint running!") + } return node } @@ -125,11 +146,16 @@ func StopTendermint(node *nm.Node) { } // NewTendermint creates a new tendermint server and sleeps forever -func NewTendermint(app abci.Application) *nm.Node { +func NewTendermint(app abci.Application, opts *Options) *nm.Node { // Create & start node - config := GetConfig() - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - logger = log.NewFilter(logger, log.AllowError()) + config := GetConfig(opts.recreateConfig) + var logger log.Logger + if opts.suppressStdout { + logger = log.NewNopLogger() + } else { + logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + logger = log.NewFilter(logger, log.AllowError()) + } pvKeyFile := config.PrivValidatorKeyFile() pvKeyStateFile := config.PrivValidatorStateFile() pv := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile) @@ -148,3 +174,15 @@ func NewTendermint(app abci.Application) *nm.Node { } return node } + +// SuppressStdout is an option that tries to make sure the RPC test Tendermint +// node doesn't log anything to stdout. +func SuppressStdout(o *Options) { + o.suppressStdout = true +} + +// RecreateConfig instructs the RPC test to recreate the configuration each +// time, instead of treating it as a global singleton. +func RecreateConfig(o *Options) { + o.recreateConfig = true +} diff --git a/scripts/dist.sh b/scripts/dist.sh index f999c537..ac62f109 100755 --- a/scripts/dist.sh +++ b/scripts/dist.sh @@ -31,9 +31,6 @@ XC_EXCLUDE=${XC_EXCLUDE:-" darwin/arm solaris/amd64 solaris/386 solaris/arm free # Make sure build tools are available. make get_tools -# Get VENDORED dependencies -make get_vendor_deps - # Build! # ldflags: -s Omit the symbol table and debug information. # -w Omit the DWARF symbol table. diff --git a/scripts/get_tools.sh b/scripts/get_tools.sh index dd956691..d8c17df1 100755 --- a/scripts/get_tools.sh +++ b/scripts/get_tools.sh @@ -48,9 +48,6 @@ installFromGithub() { echo "" } -######################## COMMON TOOLS ######################################## -installFromGithub golang/dep 22125cfaa6ddc71e145b1535d4b7ee9744fefff2 cmd/dep - ######################## DEVELOPER TOOLS ##################################### installFromGithub gogo/protobuf 61dbc136cf5d2f08d68a011382652244990a53a9 protoc-gen-gogo diff --git a/scripts/install/install_tendermint_arm.sh b/scripts/install/install_tendermint_arm.sh index b260d8d0..085ba82f 100644 --- a/scripts/install/install_tendermint_arm.sh +++ b/scripts/install/install_tendermint_arm.sh @@ -32,7 +32,6 @@ git checkout $BRANCH # XXX: uncomment if branch isn't master # git fetch origin $BRANCH make get_tools -make get_vendor_deps make install # the binary is located in $GOPATH/bin diff --git a/scripts/install/install_tendermint_bsd.sh b/scripts/install/install_tendermint_bsd.sh index b76b9485..294155d0 100644 --- a/scripts/install/install_tendermint_bsd.sh +++ b/scripts/install/install_tendermint_bsd.sh @@ -47,7 +47,6 @@ cd "$GOPATH/src/$REPO" # build & install master git checkout $BRANCH gmake get_tools -gmake get_vendor_deps gmake install # the binary is located in $GOPATH/bin diff --git a/scripts/install/install_tendermint_osx.sh b/scripts/install/install_tendermint_osx.sh index b4107ab0..ee799f66 100644 --- a/scripts/install/install_tendermint_osx.sh +++ b/scripts/install/install_tendermint_osx.sh @@ -37,5 +37,4 @@ git checkout $BRANCH # XXX: uncomment if branch isn't master # git fetch origin $BRANCH make get_tools -make get_vendor_deps make install diff --git a/scripts/install/install_tendermint_ubuntu.sh b/scripts/install/install_tendermint_ubuntu.sh index 3fe6ea8e..2e5558ff 100644 --- a/scripts/install/install_tendermint_ubuntu.sh +++ b/scripts/install/install_tendermint_ubuntu.sh @@ -41,7 +41,6 @@ git checkout $BRANCH # XXX: uncomment if branch isn't master # git fetch origin $BRANCH make get_tools -make get_vendor_deps make install # the binary is located in $GOPATH/bin diff --git a/scripts/release_management/bump-semver.py b/scripts/release_management/bump-semver.py index b13a1034..ce56d8d7 100755 --- a/scripts/release_management/bump-semver.py +++ b/scripts/release_management/bump-semver.py @@ -8,6 +8,7 @@ import re import argparse +import sys def semver(ver): @@ -17,6 +18,18 @@ def semver(ver): return ver +def get_tendermint_version(): + """Extracts the current Tendermint version from version/version.go""" + pattern = re.compile(r"TMCoreSemVer = \"(?P([0-9.]+)+)\"") + with open("version/version.go", "rt") as version_file: + for line in version_file: + m = pattern.search(line) + if m: + return m.group('version') + + return None + + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--version", help="Version number to bump, e.g.: v1.0.0", required=True, type=semver) @@ -34,4 +47,16 @@ if __name__ == "__main__": else: patch = int(patch) + 1 - print("{0}.{1}".format(majorminorprefix, patch)) + expected_version = "{0}.{1}".format(majorminorprefix, patch) + # if we're doing a release + if expected_version != "v0.0.0": + cur_version = get_tendermint_version() + if not cur_version: + print("Failed to obtain Tendermint version from version/version.go") + sys.exit(1) + expected_version_noprefix = expected_version.lstrip("v") + if expected_version_noprefix != "0.0.0" and expected_version_noprefix != cur_version: + print("Expected version/version.go#TMCoreSemVer to be {0}, but was {1}".format(expected_version_noprefix, cur_version)) + sys.exit(1) + + print(expected_version) diff --git a/state/execution.go b/state/execution.go index 3a11ecca..7e49a9ad 100644 --- a/state/execution.go +++ b/state/execution.go @@ -8,6 +8,7 @@ import ( dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/fail" "github.com/tendermint/tendermint/libs/log" + mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -30,7 +31,7 @@ type BlockExecutor struct { // manage the mempool lock during commit // and update both with block results after commit. - mempool Mempool + mempool mempl.Mempool evpool EvidencePool logger log.Logger @@ -48,7 +49,7 @@ func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption { // NewBlockExecutor returns a new BlockExecutor with a NopEventBus. // Call SetEventBus to provide one. -func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus, mempool Mempool, evpool EvidencePool, options ...BlockExecutorOption) *BlockExecutor { +func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus, mempool mempl.Mempool, evpool EvidencePool, options ...BlockExecutorOption) *BlockExecutor { res := &BlockExecutor{ db: db, proxyApp: proxyApp, @@ -66,6 +67,10 @@ func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsen return res } +func (blockExec *BlockExecutor) DB() dbm.DB { + return blockExec.db +} + // SetEventBus - sets the event bus for publishing block related events. // If not called, it defaults to types.NopEventBus. func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) { @@ -116,7 +121,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b } startTime := time.Now().UnixNano() - abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, state.LastValidators, blockExec.db) + abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, blockExec.db) endTime := time.Now().UnixNano() blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) if err != nil { @@ -151,7 +156,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b } // Lock mempool, commit app state, update mempoool. - appHash, err := blockExec.Commit(state, block) + appHash, err := blockExec.Commit(state, block, abciResponses.DeliverTx) if err != nil { return state, fmt.Errorf("Commit failed for application: %v", err) } @@ -183,6 +188,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b func (blockExec *BlockExecutor) Commit( state State, block *types.Block, + deliverTxResponses []*abci.ResponseDeliverTx, ) ([]byte, error) { blockExec.mempool.Lock() defer blockExec.mempool.Unlock() @@ -217,6 +223,7 @@ func (blockExec *BlockExecutor) Commit( err = blockExec.mempool.Update( block.Height, block.Txs, + deliverTxResponses, TxPreCheck(state), TxPostCheck(state), ) @@ -233,7 +240,6 @@ func execBlockOnProxyApp( logger log.Logger, proxyAppConn proxy.AppConnConsensus, block *types.Block, - lastValSet *types.ValidatorSet, stateDB dbm.DB, ) (*ABCIResponses, error) { var validTxs, invalidTxs = 0, 0 @@ -261,7 +267,7 @@ func execBlockOnProxyApp( } proxyAppConn.SetResponseCallback(proxyCb) - commitInfo, byzVals := getBeginBlockValidatorInfo(block, lastValSet, stateDB) + commitInfo, byzVals := getBeginBlockValidatorInfo(block, stateDB) // Begin block var err error @@ -296,22 +302,31 @@ func execBlockOnProxyApp( return abciResponses, nil } -func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) (abci.LastCommitInfo, []abci.Evidence) { - - // Sanity check that commit length matches validator set size - - // only applies after first block +func getBeginBlockValidatorInfo(block *types.Block, stateDB dbm.DB) (abci.LastCommitInfo, []abci.Evidence) { + voteInfos := make([]abci.VoteInfo, block.LastCommit.Size()) + byzVals := make([]abci.Evidence, len(block.Evidence.Evidence)) + var lastValSet *types.ValidatorSet + var err error if block.Height > 1 { - precommitLen := len(block.LastCommit.Precommits) + lastValSet, err = LoadValidators(stateDB, block.Height-1) + if err != nil { + panic(err) // shouldn't happen + } + + // Sanity check that commit length matches validator set size - + // only applies after first block + + precommitLen := block.LastCommit.Size() valSetLen := len(lastValSet.Validators) if precommitLen != valSetLen { // sanity check panic(fmt.Sprintf("precommit length (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", precommitLen, valSetLen, block.Height, block.LastCommit.Precommits, lastValSet.Validators)) } + } else { + lastValSet = types.NewValidatorSet(nil) } - // Collect the vote info (list of validators and whether or not they signed). - voteInfos := make([]abci.VoteInfo, len(lastValSet.Validators)) for i, val := range lastValSet.Validators { var vote *types.CommitSig if i < len(block.LastCommit.Precommits) { @@ -324,12 +339,6 @@ func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorS voteInfos[i] = voteInfo } - commitInfo := abci.LastCommitInfo{ - Round: int32(block.LastCommit.Round()), - Votes: voteInfos, - } - - byzVals := make([]abci.Evidence, len(block.Evidence.Evidence)) for i, ev := range block.Evidence.Evidence { // We need the validator set. We already did this in validateBlock. // TODO: Should we instead cache the valset in the evidence itself and add @@ -341,6 +350,10 @@ func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorS byzVals[i] = types.TM2PB.Evidence(ev, valset, block.Time) } + commitInfo := abci.LastCommitInfo{ + Round: int32(block.LastCommit.Round()), + Votes: voteInfos, + } return commitInfo, byzVals } @@ -469,10 +482,9 @@ func ExecCommitBlock( appConnConsensus proxy.AppConnConsensus, block *types.Block, logger log.Logger, - lastValSet *types.ValidatorSet, stateDB dbm.DB, ) ([]byte, error) { - _, err := execBlockOnProxyApp(logger, appConnConsensus, block, lastValSet, stateDB) + _, err := execBlockOnProxyApp(logger, appConnConsensus, block, stateDB) if err != nil { logger.Error("Error executing block on proxy app", "height", block.Height, "err", err) return nil, err diff --git a/state/execution_test.go b/state/execution_test.go index a9fdfe27..80b442e3 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -13,13 +13,12 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" - cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" - tmtime "github.com/tendermint/tendermint/types/time" - + "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) var ( @@ -38,7 +37,7 @@ func TestApplyBlock(t *testing.T) { state, stateDB := state(1, 1) blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), - MockMempool{}, MockEvidencePool{}) + mock.Mempool{}, MockEvidencePool{}) block := makeBlock(state, 1) blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} @@ -85,7 +84,7 @@ func TestBeginBlockValidators(t *testing.T) { // block for height 2 block, _ := state.MakeBlock(2, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) - _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB) + _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB) require.Nil(t, err, tc.desc) // -> app receives a list of validators with a bool indicating if they signed @@ -146,7 +145,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { block, _ := state.MakeBlock(10, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) block.Time = now block.Evidence.Evidence = tc.evidence - _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB) + _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB) require.Nil(t, err, tc.desc) // -> app must receive an index of the byzantine validator @@ -310,7 +309,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { state, stateDB := state(1, 1) - blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), MockMempool{}, MockEvidencePool{}) + blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, MockEvidencePool{}) eventBus := types.NewEventBus() err = eventBus.Start() @@ -367,7 +366,7 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { defer proxyApp.Stop() state, stateDB := state(1, 1) - blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), MockMempool{}, MockEvidencePool{}) + blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, MockEvidencePool{}) block := makeBlock(state, 1) blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} @@ -455,7 +454,7 @@ func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { } func (app *testApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { - return abci.ResponseDeliverTx{Tags: []cmn.KVPair{}} + return abci.ResponseDeliverTx{Events: []abci.Event{}} } func (app *testApp) CheckTx(tx []byte) abci.ResponseCheckTx { diff --git a/state/services.go b/state/services.go index 07d12c5a..98f6afce 100644 --- a/state/services.go +++ b/state/services.go @@ -1,8 +1,6 @@ package state import ( - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/types" ) @@ -11,57 +9,6 @@ import ( // NOTE: Interfaces used by RPC must be thread safe! //------------------------------------------------------ -//------------------------------------------------------ -// mempool - -// Mempool defines the mempool interface as used by the ConsensusState. -// Updates to the mempool need to be synchronized with committing a block -// so apps can reset their transient state on Commit -type Mempool interface { - Lock() - Unlock() - - Size() int - CheckTx(types.Tx, func(*abci.Response)) error - CheckTxWithInfo(types.Tx, func(*abci.Response), mempool.TxInfo) error - ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs - Update(int64, types.Txs, mempool.PreCheckFunc, mempool.PostCheckFunc) error - Flush() - FlushAppConn() error - - TxsAvailable() <-chan struct{} - EnableTxsAvailable() -} - -// MockMempool is an empty implementation of a Mempool, useful for testing. -type MockMempool struct{} - -var _ Mempool = MockMempool{} - -func (MockMempool) Lock() {} -func (MockMempool) Unlock() {} -func (MockMempool) Size() int { return 0 } -func (MockMempool) CheckTx(_ types.Tx, _ func(*abci.Response)) error { - return nil -} -func (MockMempool) CheckTxWithInfo(_ types.Tx, _ func(*abci.Response), - _ mempool.TxInfo) error { - return nil -} -func (MockMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } -func (MockMempool) Update( - _ int64, - _ types.Txs, - _ mempool.PreCheckFunc, - _ mempool.PostCheckFunc, -) error { - return nil -} -func (MockMempool) Flush() {} -func (MockMempool) FlushAppConn() error { return nil } -func (MockMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (MockMempool) EnableTxsAvailable() {} - //------------------------------------------------------ // blockstore @@ -96,7 +43,7 @@ type EvidencePool interface { IsCommitted(types.Evidence) bool } -// MockMempool is an empty implementation of a Mempool, useful for testing. +// MockEvidencePool is an empty implementation of a Mempool, useful for testing. type MockEvidencePool struct{} func (m MockEvidencePool) PendingEvidence(int64) []types.Evidence { return nil } diff --git a/state/state_test.go b/state/state_test.go index eddbe255..c7600cc3 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -93,8 +93,8 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { // Build mock responses. block := makeBlock(state, 2) abciResponses := NewABCIResponses(block) - abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: nil} - abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Tags: nil} + abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil} + abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil} abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{ types.TM2PB.NewValidatorUpdate(ed25519.GenPrivKey().PubKey(), 10), }} @@ -134,11 +134,13 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { 2: { []*abci.ResponseDeliverTx{ {Code: 383}, - {Data: []byte("Gotcha!"), - Tags: []cmn.KVPair{ - {Key: []byte("a"), Value: []byte("1")}, - {Key: []byte("build"), Value: []byte("stuff")}, - }}, + { + Data: []byte("Gotcha!"), + Events: []abci.Event{ + {Type: "type1", Attributes: []cmn.KVPair{{Key: []byte("a"), Value: []byte("1")}}}, + {Type: "type2", Attributes: []cmn.KVPair{{Key: []byte("build"), Value: []byte("stuff")}}}, + }, + }, }, types.ABCIResults{ {383, nil}, diff --git a/state/store.go b/state/store.go index 73116b43..f0bb9e14 100644 --- a/state/store.go +++ b/state/store.go @@ -115,9 +115,9 @@ func saveState(db dbm.DB, state State, key []byte) { // of the various ABCI calls during block processing. // It is persisted to disk for each height before calling Commit. type ABCIResponses struct { - DeliverTx []*abci.ResponseDeliverTx - EndBlock *abci.ResponseEndBlock - BeginBlock *abci.ResponseBeginBlock + DeliverTx []*abci.ResponseDeliverTx `json:"deliver_tx"` + EndBlock *abci.ResponseEndBlock `json:"end_block"` + BeginBlock *abci.ResponseBeginBlock `json:"begin_block"` } // NewABCIResponses returns a new ABCIResponses @@ -193,7 +193,7 @@ func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { if valInfo.ValidatorSet == nil { lastStoredHeight := lastStoredHeightFor(height, valInfo.LastHeightChanged) valInfo2 := loadValidatorsInfo(db, lastStoredHeight) - if valInfo2 == nil { + if valInfo2 == nil || valInfo2.ValidatorSet == nil { // TODO (melekes): remove the below if condition in the 0.33 major // release and just panic. Old chains might panic otherwise if they // haven't saved validators at intermediate (%valSetCheckpointInterval) @@ -201,7 +201,7 @@ func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { // https://github.com/tendermint/tendermint/issues/3543 valInfo2 = loadValidatorsInfo(db, valInfo.LastHeightChanged) lastStoredHeight = valInfo.LastHeightChanged - if valInfo2 == nil { + if valInfo2 == nil || valInfo2.ValidatorSet == nil { panic( fmt.Sprintf("Couldn't find validators at height %d (height %d was originally requested)", lastStoredHeight, diff --git a/state/store_test.go b/state/store_test.go index dd48cae7..06adeefa 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -6,34 +6,50 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" cfg "github.com/tendermint/tendermint/config" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/types" ) -func TestSaveValidatorsInfo(t *testing.T) { - // test we persist validators every valSetCheckpointInterval blocks +func TestStoreLoadValidators(t *testing.T) { stateDB := dbm.NewMemDB() val, _ := types.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) - // TODO(melekes): remove in 0.33 release - // https://github.com/tendermint/tendermint/issues/3543 + // 1) LoadValidators loads validators using a height where they were last changed saveValidatorsInfo(stateDB, 1, 1, vals) saveValidatorsInfo(stateDB, 2, 1, vals) + loadedVals, err := LoadValidators(stateDB, 2) + require.NoError(t, err) + assert.NotZero(t, loadedVals.Size()) + + // 2) LoadValidators loads validators using a checkpoint height + + // TODO(melekes): REMOVE in 0.33 release + // https://github.com/tendermint/tendermint/issues/3543 + // for releases prior to v0.31.4, it uses last height changed + valInfo := &ValidatorsInfo{ + LastHeightChanged: valSetCheckpointInterval, + } + stateDB.Set(calcValidatorsKey(valSetCheckpointInterval), valInfo.Bytes()) assert.NotPanics(t, func() { - _, err := LoadValidators(stateDB, 2) + saveValidatorsInfo(stateDB, valSetCheckpointInterval+1, 1, vals) + loadedVals, err := LoadValidators(stateDB, valSetCheckpointInterval+1) if err != nil { - panic(err) + t.Fatal(err) + } + if loadedVals.Size() == 0 { + t.Fatal("Expected validators to be non-empty") } }) - //ENDREMOVE + // ENDREMOVE saveValidatorsInfo(stateDB, valSetCheckpointInterval, 1, vals) - loadedVals, err := LoadValidators(stateDB, valSetCheckpointInterval) - assert.NoError(t, err) + loadedVals, err = LoadValidators(stateDB, valSetCheckpointInterval) + require.NoError(t, err) assert.NotZero(t, loadedVals.Size()) } diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index 84208b8c..053d26a7 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -10,9 +10,9 @@ import ( "time" "github.com/pkg/errors" + cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" @@ -75,7 +75,10 @@ func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { return txResult, nil } -// AddBatch indexes a batch of transactions using the given list of tags. +// AddBatch indexes a batch of transactions using the given list of events. Each +// key that indexed from the tx's events is a composite of the event type and +// the respective attribute's key delimited by a "." (eg. "account.number"). +// Any event with an empty type is not indexed. func (txi *TxIndex) AddBatch(b *txindex.Batch) error { storeBatch := txi.store.NewBatch() defer storeBatch.Close() @@ -83,12 +86,8 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { for _, result := range b.Ops { hash := result.Tx.Hash() - // index tx by tags - for _, tag := range result.Result.Tags { - if txi.indexAllTags || cmn.StringInSlice(string(tag.Key), txi.tagsToIndex) { - storeBatch.Set(keyForTag(tag, result), hash) - } - } + // index tx by events + txi.indexEvents(result, hash, storeBatch) // index tx by height if txi.indexAllTags || cmn.StringInSlice(types.TxHeightKey, txi.tagsToIndex) { @@ -107,19 +106,18 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { return nil } -// Index indexes a single transaction using the given list of tags. +// Index indexes a single transaction using the given list of events. Each key +// that indexed from the tx's events is a composite of the event type and the +// respective attribute's key delimited by a "." (eg. "account.number"). +// Any event with an empty type is not indexed. func (txi *TxIndex) Index(result *types.TxResult) error { b := txi.store.NewBatch() defer b.Close() hash := result.Tx.Hash() - // index tx by tags - for _, tag := range result.Result.Tags { - if txi.indexAllTags || cmn.StringInSlice(string(tag.Key), txi.tagsToIndex) { - b.Set(keyForTag(tag, result), hash) - } - } + // index tx by events + txi.indexEvents(result, hash, b) // index tx by height if txi.indexAllTags || cmn.StringInSlice(types.TxHeightKey, txi.tagsToIndex) { @@ -131,12 +129,33 @@ func (txi *TxIndex) Index(result *types.TxResult) error { if err != nil { return err } - b.Set(hash, rawBytes) + b.Set(hash, rawBytes) b.Write() + return nil } +func (txi *TxIndex) indexEvents(result *types.TxResult, hash []byte, store dbm.SetDeleter) { + for _, event := range result.Result.Events { + // only index events with a non-empty type + if len(event.Type) == 0 { + continue + } + + for _, attr := range event.Attributes { + if len(attr.Key) == 0 { + continue + } + + compositeTag := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) + if txi.indexAllTags || cmn.StringInSlice(compositeTag, txi.tagsToIndex) { + store.Set(keyForEvent(compositeTag, attr.Value, result), hash) + } + } + } +} + // Search performs a search using the given query. It breaks the query into // conditions (like "tx.height > 5"). For each condition, it queries the DB // index. One special use cases here: (1) if "tx.hash" is found, it returns tx @@ -343,7 +362,7 @@ func (txi *TxIndex) match(c query.Condition, startKeyBz []byte) (hashes [][]byte } } else if c.Op == query.OpContains { // XXX: startKey does not apply here. - // For example, if startKey = "account.owner/an/" and search query = "accoutn.owner CONTAINS an" + // For example, if startKey = "account.owner/an/" and search query = "account.owner CONTAINS an" // we can't iterate with prefix "account.owner/an/" because we might miss keys like "account.owner/Ulan/" it := dbm.IteratePrefix(txi.store, startKey(c.Tag)) defer it.Close() @@ -420,10 +439,10 @@ func extractValueFromKey(key []byte) string { return parts[1] } -func keyForTag(tag cmn.KVPair, result *types.TxResult) []byte { +func keyForEvent(key string, value []byte, result *types.TxResult) []byte { return []byte(fmt.Sprintf("%s/%s/%d/%d", - tag.Key, - tag.Value, + key, + value, result.Height, result.Index, )) diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index b726a423..cacfaad0 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -21,7 +21,15 @@ func TestTxIndex(t *testing.T) { indexer := NewTxIndex(db.NewMemDB()) tx := types.Tx("HELLO WORLD") - txResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: nil}} + txResult := &types.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: abci.ResponseDeliverTx{ + Data: []byte{0}, + Code: abci.CodeTypeOK, Log: "", Events: nil, + }, + } hash := tx.Hash() batch := txindex.NewBatch(1) @@ -36,7 +44,15 @@ func TestTxIndex(t *testing.T) { assert.Equal(t, txResult, loadedTxResult) tx2 := types.Tx("BYE BYE WORLD") - txResult2 := &types.TxResult{1, 0, tx2, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: nil}} + txResult2 := &types.TxResult{ + Height: 1, + Index: 0, + Tx: tx2, + Result: abci.ResponseDeliverTx{ + Data: []byte{0}, + Code: abci.CodeTypeOK, Log: "", Events: nil, + }, + } hash2 := tx2.Hash() err = indexer.Index(txResult2) @@ -51,10 +67,10 @@ func TestTxSearch(t *testing.T) { allowedTags := []string{"account.number", "account.owner", "account.date"} indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags)) - txResult := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("1")}, - {Key: []byte("account.owner"), Value: []byte("Ivan")}, - {Key: []byte("not_allowed"), Value: []byte("Vlad")}, + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("1")}}}, + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("owner"), Value: []byte("Ivan")}}}, + {Type: "", Attributes: []cmn.KVPair{{Key: []byte("not_allowed"), Value: []byte("Vlad")}}}, }) hash := txResult.Tx.Hash() @@ -108,13 +124,82 @@ func TestTxSearch(t *testing.T) { } } +func TestTxSearchDeprecatedIndexing(t *testing.T) { + allowedTags := []string{"account.number", "sender"} + indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags)) + + // index tx using events indexing (composite key) + txResult1 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("1")}}}, + }) + hash1 := txResult1.Tx.Hash() + + err := indexer.Index(txResult1) + require.NoError(t, err) + + // index tx also using deprecated indexing (tag as key) + txResult2 := txResultWithEvents(nil) + txResult2.Tx = types.Tx("HELLO WORLD 2") + + hash2 := txResult2.Tx.Hash() + b := indexer.store.NewBatch() + + rawBytes, err := cdc.MarshalBinaryBare(txResult2) + require.NoError(t, err) + + depKey := []byte(fmt.Sprintf("%s/%s/%d/%d", + "sender", + "addr1", + txResult2.Height, + txResult2.Index, + )) + + b.Set(depKey, hash2) + b.Set(keyForHeight(txResult2), hash2) + b.Set(hash2, rawBytes) + b.Write() + + testCases := []struct { + q string + results []*types.TxResult + }{ + // search by hash + {fmt.Sprintf("tx.hash = '%X'", hash1), []*types.TxResult{txResult1}}, + // search by hash + {fmt.Sprintf("tx.hash = '%X'", hash2), []*types.TxResult{txResult2}}, + // search by exact match (one tag) + {"account.number = 1", []*types.TxResult{txResult1}}, + {"account.number >= 1 AND account.number <= 5", []*types.TxResult{txResult1}}, + // search by range (lower bound) + {"account.number >= 1", []*types.TxResult{txResult1}}, + // search by range (upper bound) + {"account.number <= 5", []*types.TxResult{txResult1}}, + // search using not allowed tag + {"not_allowed = 'boom'", []*types.TxResult{}}, + // search for not existing tx result + {"account.number >= 2 AND account.number <= 5", []*types.TxResult{}}, + // search using not existing tag + {"account.date >= TIME 2013-05-03T14:45:00Z", []*types.TxResult{}}, + // search by deprecated tag + {"sender = 'addr1'", []*types.TxResult{txResult2}}, + } + + for _, tc := range testCases { + t.Run(tc.q, func(t *testing.T) { + results, err := indexer.Search(query.MustParse(tc.q)) + require.NoError(t, err) + require.Equal(t, results, tc.results) + }) + } +} + func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { allowedTags := []string{"account.number"} indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags)) - txResult := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("1")}, - {Key: []byte("account.number"), Value: []byte("2")}, + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("1")}}}, + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("2")}}}, }) err := indexer.Index(txResult) @@ -132,9 +217,10 @@ func TestTxSearchMultipleTxs(t *testing.T) { indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags)) // indexed first, but bigger height (to test the order of transactions) - txResult := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("1")}, + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("1")}}}, }) + txResult.Tx = types.Tx("Bob's account") txResult.Height = 2 txResult.Index = 1 @@ -142,8 +228,8 @@ func TestTxSearchMultipleTxs(t *testing.T) { require.NoError(t, err) // indexed second, but smaller height (to test the order of transactions) - txResult2 := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("2")}, + txResult2 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("2")}}}, }) txResult2.Tx = types.Tx("Alice's account") txResult2.Height = 1 @@ -153,8 +239,8 @@ func TestTxSearchMultipleTxs(t *testing.T) { require.NoError(t, err) // indexed third (to test the order of transactions) - txResult3 := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("3")}, + txResult3 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("3")}}}, }) txResult3.Tx = types.Tx("Jack's account") txResult3.Height = 1 @@ -164,8 +250,8 @@ func TestTxSearchMultipleTxs(t *testing.T) { // indexed fourth (to test we don't include txs with similar tags) // https://github.com/tendermint/tendermint/issues/2908 - txResult4 := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number.id"), Value: []byte("1")}, + txResult4 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number.id"), Value: []byte("1")}}}, }) txResult4.Tx = types.Tx("Mike's account") txResult4.Height = 2 @@ -183,9 +269,9 @@ func TestTxSearchMultipleTxs(t *testing.T) { func TestIndexAllTags(t *testing.T) { indexer := NewTxIndex(db.NewMemDB(), IndexAllTags()) - txResult := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.owner"), Value: []byte("Ivan")}, - {Key: []byte("account.number"), Value: []byte("1")}, + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("owner"), Value: []byte("Ivan")}}}, + {Type: "account", Attributes: []cmn.KVPair{{Key: []byte("number"), Value: []byte("1")}}}, }) err := indexer.Index(txResult) @@ -202,17 +288,17 @@ func TestIndexAllTags(t *testing.T) { assert.Equal(t, []*types.TxResult{txResult}, results) } -func txResultWithTags(tags []cmn.KVPair) *types.TxResult { +func txResultWithEvents(events []abci.Event) *types.TxResult { tx := types.Tx("HELLO WORLD") return &types.TxResult{ Height: 1, Index: 0, Tx: tx, Result: abci.ResponseDeliverTx{ - Data: []byte{0}, - Code: abci.CodeTypeOK, - Log: "", - Tags: tags, + Data: []byte{0}, + Code: abci.CodeTypeOK, + Log: "", + Events: events, }, } } @@ -236,10 +322,10 @@ func benchmarkTxIndex(txsCount int64, b *testing.B) { Index: txIndex, Tx: tx, Result: abci.ResponseDeliverTx{ - Data: []byte{0}, - Code: abci.CodeTypeOK, - Log: "", - Tags: []cmn.KVPair{}, + Data: []byte{0}, + Code: abci.CodeTypeOK, + Log: "", + Events: []abci.Event{}, }, } if err := batch.Add(txResult); err != nil { diff --git a/test/app/counter_test.sh b/test/app/counter_test.sh index 868f8d03..a4f7c83b 100755 --- a/test/app/counter_test.sh +++ b/test/app/counter_test.sh @@ -1,5 +1,7 @@ #! /bin/bash +export GO111MODULE=on + if [[ "$GRPC_BROADCAST_TX" == "" ]]; then GRPC_BROADCAST_TX="" fi @@ -38,7 +40,7 @@ if [[ "$GRPC_BROADCAST_TX" != "" ]]; then rm grpc_client fi echo "... building grpc_client" - go build -o grpc_client grpc_client.go + go build -mod=readonly -o grpc_client grpc_client.go fi function sendTx() { diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index 1a64d417..77cc515e 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.10 +FROM golang:1.12 # Add testing deps for curl RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list @@ -20,7 +20,6 @@ COPY . $REPO # Install the vendored dependencies # docker caching prevents reinstall on code change! RUN make get_tools -RUN make get_vendor_deps # install ABCI CLI RUN make install_abci diff --git a/tools/build/Makefile b/tools/build/Makefile index f9384ac6..8c33ffd5 100644 --- a/tools/build/Makefile +++ b/tools/build/Makefile @@ -64,7 +64,7 @@ build-tendermint: git-branch gopath-setup @echo "*** Building tendermint" go get -d -u github.com/tendermint/tendermint/cmd/tendermint cd $(GOPATH)/src/github.com/tendermint/tendermint && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/tendermint get_tools get_vendor_deps build + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/tendermint get_tools build cp $(GOPATH)/src/github.com/tendermint/tendermint/build/tendermint $(GOPATH)/bin @echo "*** Built tendermint" @@ -72,7 +72,7 @@ build-ethermint: git-branch gopath-setup @echo "*** Building ethermint" go get -d -u github.com/tendermint/ethermint/cmd/ethermint cd $(GOPATH)/src/github.com/tendermint/ethermint && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/ethermint get_vendor_deps build + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/ethermint build cp $(GOPATH)/src/github.com/tendermint/ethermint/build/ethermint $(GOPATH)/bin @echo "*** Built ethermint" @@ -80,14 +80,14 @@ build-gaia: git-branch gopath-setup @echo "*** Building gaia" go get -d -u go github.com/cosmos/gaia || echo "Workaround for go downloads." cd $(GOPATH)/src/github.com/cosmos/gaia && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/gaia get_vendor_deps install + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/gaia install @echo "*** Built gaia" build-basecoind: git-branch gopath-setup @echo "*** Building basecoind from cosmos-sdk" go get -d -u github.com/cosmos/cosmos-sdk/examples/basecoin/cmd/basecoind cd $(GOPATH)/src/github.com/cosmos/cosmos-sdk && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/cosmos-sdk get_tools get_vendor_deps build + export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/cosmos-sdk get_tools build cp $(GOPATH)/src/github.com/cosmos/cosmos-sdk/build/basecoind $(GOPATH)/bin/basecoind @echo "*** Built basecoind from cosmos-sdk" diff --git a/tools/tm-bench/Dockerfile.dev b/tools/tm-bench/Dockerfile.dev index 469bb815..1151965a 100644 --- a/tools/tm-bench/Dockerfile.dev +++ b/tools/tm-bench/Dockerfile.dev @@ -9,4 +9,3 @@ RUN make get_tools COPY . /go/src/github.com/tendermint/tendermint/tools/tm-bench -RUN make get_vendor_deps diff --git a/tools/tm-bench/README.md b/tools/tm-bench/README.md index b4e8cec5..d5ed1231 100644 --- a/tools/tm-bench/README.md +++ b/tools/tm-bench/README.md @@ -100,6 +100,5 @@ Each of the connections is handled via two separate goroutines. ## Development ``` -make get_vendor_deps make test ``` diff --git a/tools/tm-monitor/Dockerfile.dev b/tools/tm-monitor/Dockerfile.dev index 5bfbbfd5..e593bf89 100644 --- a/tools/tm-monitor/Dockerfile.dev +++ b/tools/tm-monitor/Dockerfile.dev @@ -9,4 +9,3 @@ RUN make get_tools COPY . /go/src/github.com/tendermint/tools/tm-monitor -RUN make get_vendor_deps diff --git a/tools/tm-monitor/README.md b/tools/tm-monitor/README.md index 374a56b0..2bd367b9 100644 --- a/tools/tm-monitor/README.md +++ b/tools/tm-monitor/README.md @@ -87,6 +87,5 @@ websocket. ``` make get_tools -make get_vendor_deps make test ``` diff --git a/types/block.go b/types/block.go index 6616c0ee..313eb6b7 100644 --- a/types/block.go +++ b/types/block.go @@ -500,6 +500,8 @@ func (cs *CommitSig) toVote() *Vote { return &v } +//------------------------------------- + // Commit contains the evidence that a block was committed by a set of validators. // NOTE: Commit is empty for height 1, but never nil. type Commit struct { @@ -528,15 +530,61 @@ func NewCommit(blockID BlockID, precommits []*CommitSig) *Commit { } } +// Construct a VoteSet from the Commit and validator set. Panics +// if precommits from the commit can't be added to the voteset. +// Inverse of VoteSet.MakeCommit(). +func CommitToVoteSet(chainID string, commit *Commit, vals *ValidatorSet) *VoteSet { + height, round, typ := commit.Height(), commit.Round(), PrecommitType + voteSet := NewVoteSet(chainID, height, round, typ, vals) + for idx, precommit := range commit.Precommits { + if precommit == nil { + continue + } + added, err := voteSet.AddVote(commit.GetVote(idx)) + if !added || err != nil { + panic(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err)) + } + } + return voteSet +} + +// GetVote converts the CommitSig for the given valIdx to a Vote. +// Returns nil if the precommit at valIdx is nil. +// Panics if valIdx >= commit.Size(). +func (commit *Commit) GetVote(valIdx int) *Vote { + commitSig := commit.Precommits[valIdx] + if commitSig == nil { + return nil + } + + // NOTE: this commitSig might be for a nil blockID, + // so we can't just use commit.BlockID here. + // For #1648, CommitSig will need to indicate what BlockID it's for ! + blockID := commitSig.BlockID + commit.memoizeHeightRound() + return &Vote{ + Type: PrecommitType, + Height: commit.height, + Round: commit.round, + BlockID: blockID, + Timestamp: commitSig.Timestamp, + ValidatorAddress: commitSig.ValidatorAddress, + ValidatorIndex: valIdx, + Signature: commitSig.Signature, + } +} + // VoteSignBytes constructs the SignBytes for the given CommitSig. // The only unique part of the SignBytes is the Timestamp - all other fields // signed over are otherwise the same for all validators. -func (commit *Commit) VoteSignBytes(chainID string, cs *CommitSig) []byte { - return commit.ToVote(cs).SignBytes(chainID) +// Panics if valIdx >= commit.Size(). +func (commit *Commit) VoteSignBytes(chainID string, valIdx int) []byte { + return commit.GetVote(valIdx).SignBytes(chainID) } // memoizeHeightRound memoizes the height and round of the commit using // the first non-nil vote. +// Should be called before any attempt to access `commit.height` or `commit.round`. func (commit *Commit) memoizeHeightRound() { if len(commit.Precommits) == 0 { return @@ -553,14 +601,6 @@ func (commit *Commit) memoizeHeightRound() { } } -// ToVote converts a CommitSig to a Vote. -// If the CommitSig is nil, the Vote will be nil. -func (commit *Commit) ToVote(cs *CommitSig) *Vote { - // TODO: use commit.validatorSet to reconstruct vote - // and deprecate .toVote - return cs.toVote() -} - // Height returns the height of the commit func (commit *Commit) Height() int64 { commit.memoizeHeightRound() @@ -602,8 +642,8 @@ func (commit *Commit) BitArray() *cmn.BitArray { // GetByIndex returns the vote corresponding to a given validator index. // Panics if `index >= commit.Size()`. // Implements VoteSetReader. -func (commit *Commit) GetByIndex(index int) *Vote { - return commit.ToVote(commit.Precommits[index]) +func (commit *Commit) GetByIndex(valIdx int) *Vote { + return commit.GetVote(valIdx) } // IsCommit returns true if there is at least one vote. diff --git a/types/block_test.go b/types/block_test.go index 75b5c19d..ff7edd27 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -342,3 +342,27 @@ func TestBlockMaxDataBytesUnknownEvidence(t *testing.T) { } } } + +func TestCommitToVoteSet(t *testing.T) { + lastID := makeBlockIDRandom() + h := int64(3) + + voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) + assert.NoError(t, err) + + chainID := voteSet.ChainID() + voteSet2 := CommitToVoteSet(chainID, commit, valSet) + + for i := 0; i < len(vals); i++ { + vote1 := voteSet.GetByIndex(i) + vote2 := voteSet2.GetByIndex(i) + vote3 := commit.GetVote(i) + + vote1bz := cdc.MustMarshalBinaryBare(vote1) + vote2bz := cdc.MustMarshalBinaryBare(vote2) + vote3bz := cdc.MustMarshalBinaryBare(vote3) + assert.Equal(t, vote1bz, vote2bz) + assert.Equal(t, vote1bz, vote3bz) + } +} diff --git a/types/event_bus.go b/types/event_bus.go index da959090..b91340c7 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" @@ -90,20 +91,32 @@ func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error func (b *EventBus) Publish(eventType string, eventData TMEventData) error { // no explicit deadline for publishing events ctx := context.Background() - b.pubsub.PublishWithTags(ctx, eventData, map[string]string{EventTypeKey: eventType}) - return nil + return b.pubsub.PublishWithEvents(ctx, eventData, map[string][]string{EventTypeKey: {eventType}}) } -func (b *EventBus) validateAndStringifyTags(tags []cmn.KVPair, logger log.Logger) map[string]string { - result := make(map[string]string) - for _, tag := range tags { - // basic validation - if len(tag.Key) == 0 { - logger.Debug("Got tag with an empty key (skipping)", "tag", tag) +// validateAndStringifyEvents takes a slice of event objects and creates a +// map of stringified events where each key is composed of the event +// type and each of the event's attributes keys in the form of +// "{event.Type}.{attribute.Key}" and the value is each attribute's value. +func (b *EventBus) validateAndStringifyEvents(events []types.Event, logger log.Logger) map[string][]string { + result := make(map[string][]string) + for _, event := range events { + if len(event.Type) == 0 { + logger.Debug("Got an event with an empty type (skipping)", "event", event) continue } - result[string(tag.Key)] = string(tag.Value) + + for _, attr := range event.Attributes { + if len(attr.Key) == 0 { + logger.Debug("Got an event attribute with an empty key(skipping)", "event", event) + continue + } + + compositeTag := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) + result[compositeTag] = append(result[compositeTag], string(attr.Value)) + } } + return result } @@ -111,31 +124,27 @@ func (b *EventBus) PublishEventNewBlock(data EventDataNewBlock) error { // no explicit deadline for publishing events ctx := context.Background() - resultTags := append(data.ResultBeginBlock.Tags, data.ResultEndBlock.Tags...) - tags := b.validateAndStringifyTags(resultTags, b.Logger.With("block", data.Block.StringShort())) + resultEvents := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) + events := b.validateAndStringifyEvents(resultEvents, b.Logger.With("block", data.Block.StringShort())) - // add predefined tags - logIfTagExists(EventTypeKey, tags, b.Logger) - tags[EventTypeKey] = EventNewBlock + // add predefined new block event + events[EventTypeKey] = append(events[EventTypeKey], EventNewBlock) - b.pubsub.PublishWithTags(ctx, data, tags) - return nil + return b.pubsub.PublishWithEvents(ctx, data, events) } func (b *EventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { // no explicit deadline for publishing events ctx := context.Background() - resultTags := append(data.ResultBeginBlock.Tags, data.ResultEndBlock.Tags...) + resultTags := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) // TODO: Create StringShort method for Header and use it in logger. - tags := b.validateAndStringifyTags(resultTags, b.Logger.With("header", data.Header)) + events := b.validateAndStringifyEvents(resultTags, b.Logger.With("header", data.Header)) - // add predefined tags - logIfTagExists(EventTypeKey, tags, b.Logger) - tags[EventTypeKey] = EventNewBlockHeader + // add predefined new block header event + events[EventTypeKey] = append(events[EventTypeKey], EventNewBlockHeader) - b.pubsub.PublishWithTags(ctx, data, tags) - return nil + return b.pubsub.PublishWithEvents(ctx, data, events) } func (b *EventBus) PublishEventVote(data EventDataVote) error { @@ -153,20 +162,14 @@ func (b *EventBus) PublishEventTx(data EventDataTx) error { // no explicit deadline for publishing events ctx := context.Background() - tags := b.validateAndStringifyTags(data.Result.Tags, b.Logger.With("tx", data.Tx)) + events := b.validateAndStringifyEvents(data.Result.Events, b.Logger.With("tx", data.Tx)) // add predefined tags - logIfTagExists(EventTypeKey, tags, b.Logger) - tags[EventTypeKey] = EventTx + events[EventTypeKey] = append(events[EventTypeKey], EventTx) + events[TxHashKey] = append(events[TxHashKey], fmt.Sprintf("%X", data.Tx.Hash())) + events[TxHeightKey] = append(events[TxHeightKey], fmt.Sprintf("%d", data.Height)) - logIfTagExists(TxHashKey, tags, b.Logger) - tags[TxHashKey] = fmt.Sprintf("%X", data.Tx.Hash()) - - logIfTagExists(TxHeightKey, tags, b.Logger) - tags[TxHeightKey] = fmt.Sprintf("%d", data.Height) - - b.pubsub.PublishWithTags(ctx, data, tags) - return nil + return b.pubsub.PublishWithEvents(ctx, data, events) } func (b *EventBus) PublishEventNewRoundStep(data EventDataRoundState) error { @@ -209,12 +212,6 @@ func (b *EventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpd return b.Publish(EventValidatorSetUpdates, data) } -func logIfTagExists(tag string, tags map[string]string, logger log.Logger) { - if value, ok := tags[tag]; ok { - logger.Error("Found predefined tag (value will be overwritten)", "tag", tag, "value", value) - } -} - //----------------------------------------------------------------------------- type NopEventBus struct{} diff --git a/types/event_bus_test.go b/types/event_bus_test.go index 508b423a..45590217 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -22,10 +22,15 @@ func TestEventBusPublishEventTx(t *testing.T) { defer eventBus.Stop() tx := Tx("foo") - result := abci.ResponseDeliverTx{Data: []byte("bar"), Tags: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}} + result := abci.ResponseDeliverTx{ + Data: []byte("bar"), + Events: []abci.Event{ + {Type: "testType", Attributes: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}}, + }, + } // PublishEventTx adds all these 3 tags, so the query below should work - query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND baz=1", tx.Hash()) + query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND testType.baz=1", tx.Hash()) txsSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) require.NoError(t, err) @@ -62,11 +67,19 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { defer eventBus.Stop() block := MakeBlock(0, []Tx{}, nil, []Evidence{}) - resultBeginBlock := abci.ResponseBeginBlock{Tags: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}} - resultEndBlock := abci.ResponseEndBlock{Tags: []cmn.KVPair{{Key: []byte("foz"), Value: []byte("2")}}} + resultBeginBlock := abci.ResponseBeginBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}}, + }, + } + resultEndBlock := abci.ResponseEndBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []cmn.KVPair{{Key: []byte("foz"), Value: []byte("2")}}}, + }, + } // PublishEventNewBlock adds the tm.event tag, so the query below should work - query := "tm.event='NewBlock' AND baz=1 AND foz=2" + query := "tm.event='NewBlock' AND testType.baz=1 AND testType.foz=2" blocksSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) require.NoError(t, err) @@ -94,6 +107,106 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { } } +func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { + eventBus := NewEventBus() + err := eventBus.Start() + require.NoError(t, err) + defer eventBus.Stop() + + tx := Tx("foo") + result := abci.ResponseDeliverTx{ + Data: []byte("bar"), + Events: []abci.Event{ + { + Type: "transfer", + Attributes: []cmn.KVPair{ + {Key: []byte("sender"), Value: []byte("foo")}, + {Key: []byte("recipient"), Value: []byte("bar")}, + {Key: []byte("amount"), Value: []byte("5")}, + }, + }, + { + Type: "transfer", + Attributes: []cmn.KVPair{ + {Key: []byte("sender"), Value: []byte("baz")}, + {Key: []byte("recipient"), Value: []byte("cat")}, + {Key: []byte("amount"), Value: []byte("13")}, + }, + }, + { + Type: "withdraw.rewards", + Attributes: []cmn.KVPair{ + {Key: []byte("address"), Value: []byte("bar")}, + {Key: []byte("source"), Value: []byte("iceman")}, + {Key: []byte("amount"), Value: []byte("33")}, + }, + }, + }, + } + + testCases := []struct { + query string + expectResults bool + }{ + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='DoesNotExist'", + false, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='baz'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='baz'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='DoesNotExist'", + false, + }, + } + + for i, tc := range testCases { + sub, err := eventBus.Subscribe(context.Background(), fmt.Sprintf("client-%d", i), tmquery.MustParse(tc.query)) + require.NoError(t, err) + + done := make(chan struct{}) + + go func() { + msg := <-sub.Out() + data := msg.Data().(EventDataTx) + assert.Equal(t, int64(1), data.Height) + assert.Equal(t, uint32(0), data.Index) + assert.Equal(t, tx, data.Tx) + assert.Equal(t, result, data.Result) + close(done) + }() + + err = eventBus.PublishEventTx(EventDataTx{TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: result, + }}) + assert.NoError(t, err) + + select { + case <-done: + if !tc.expectResults { + require.Fail(t, "unexpected transaction result(s) from subscription") + } + case <-time.After(1 * time.Second): + if tc.expectResults { + require.Fail(t, "failed to receive a transaction after 1 second") + } + } + } +} + func TestEventBusPublishEventNewBlockHeader(t *testing.T) { eventBus := NewEventBus() err := eventBus.Start() @@ -101,11 +214,19 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { defer eventBus.Stop() block := MakeBlock(0, []Tx{}, nil, []Evidence{}) - resultBeginBlock := abci.ResponseBeginBlock{Tags: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}} - resultEndBlock := abci.ResponseEndBlock{Tags: []cmn.KVPair{{Key: []byte("foz"), Value: []byte("2")}}} + resultBeginBlock := abci.ResponseBeginBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}}, + }, + } + resultEndBlock := abci.ResponseEndBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []cmn.KVPair{{Key: []byte("foz"), Value: []byte("2")}}}, + }, + } // PublishEventNewBlockHeader adds the tm.event tag, so the query below should work - query := "tm.event='NewBlockHeader' AND baz=1 AND foz=2" + query := "tm.event='NewBlockHeader' AND testType.baz=1 AND testType.foz=2" headersSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) require.NoError(t, err) diff --git a/types/part_set.go b/types/part_set.go index 4533fb75..389db7a0 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -226,7 +226,7 @@ func (ps *PartSet) IsComplete() bool { func (ps *PartSet) GetReader() io.Reader { if !ps.IsComplete() { - cmn.PanicSanity("Cannot GetReader() on incomplete PartSet") + panic("Cannot GetReader() on incomplete PartSet") } return NewPartSetReader(ps.parts) } diff --git a/types/validator.go b/types/validator.go index 325d20f5..a662eb6c 100644 --- a/types/validator.go +++ b/types/validator.go @@ -52,8 +52,7 @@ func (v *Validator) CompareProposerPriority(other *Validator) *Validator { } else if result > 0 { return other } else { - cmn.PanicSanity("Cannot compare identical validators") - return nil + panic("Cannot compare identical validators") } } } diff --git a/types/validator_set.go b/types/validator_set.go index 36ce67f0..9e78fbc7 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -612,7 +612,7 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i } _, val := vals.GetByIndex(idx) // Validate signature. - precommitSignBytes := commit.VoteSignBytes(chainID, precommit) + precommitSignBytes := commit.VoteSignBytes(chainID, idx) if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { return fmt.Errorf("Invalid commit -- invalid signature: %v", precommit) } @@ -689,14 +689,14 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin return cmn.NewError("Invalid commit -- not precommit @ index %v", idx) } // See if this validator is in oldVals. - idx, val := oldVals.GetByAddress(precommit.ValidatorAddress) - if val == nil || seen[idx] { + oldIdx, val := oldVals.GetByAddress(precommit.ValidatorAddress) + if val == nil || seen[oldIdx] { continue // missing or double vote... } - seen[idx] = true + seen[oldIdx] = true // Validate signature. - precommitSignBytes := commit.VoteSignBytes(chainID, precommit) + precommitSignBytes := commit.VoteSignBytes(chainID, idx) if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { return cmn.NewError("Invalid commit -- invalid signature: %v", precommit) } diff --git a/types/vote.go b/types/vote.go index ad05d688..6fcbd3ff 100644 --- a/types/vote.go +++ b/types/vote.go @@ -93,7 +93,7 @@ func (vote *Vote) String() string { case PrecommitType: typeString = "Precommit" default: - cmn.PanicSanity("Unknown vote type") + panic("Unknown vote type") } return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %X @ %s}", diff --git a/types/vote_set.go b/types/vote_set.go index 1cd0f228..a4a42bb4 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -70,7 +70,7 @@ type VoteSet struct { // Constructs a new VoteSet struct used to accumulate votes for given height/round. func NewVoteSet(chainID string, height int64, round int, type_ SignedMsgType, valSet *ValidatorSet) *VoteSet { if height == 0 { - cmn.PanicSanity("Cannot make VoteSet for height == 0, doesn't make sense.") + panic("Cannot make VoteSet for height == 0, doesn't make sense.") } return &VoteSet{ chainID: chainID, @@ -130,7 +130,7 @@ func (voteSet *VoteSet) Size() int { // NOTE: Vote must not be nil func (voteSet *VoteSet) AddVote(vote *Vote) (added bool, err error) { if voteSet == nil { - cmn.PanicSanity("AddVote() on nil VoteSet") + panic("AddVote() on nil VoteSet") } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() @@ -196,7 +196,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { return added, NewConflictingVoteError(val, conflicting, vote) } if !added { - cmn.PanicSanity("Expected to add non-conflicting vote") + panic("Expected to add non-conflicting vote") } return added, nil } @@ -220,7 +220,7 @@ func (voteSet *VoteSet) addVerifiedVote(vote *Vote, blockKey string, votingPower // Already exists in voteSet.votes? if existing := voteSet.votes[valIndex]; existing != nil { if existing.BlockID.Equals(vote.BlockID) { - cmn.PanicSanity("addVerifiedVote does not expect duplicate votes") + panic("addVerifiedVote does not expect duplicate votes") } else { conflicting = existing } @@ -290,7 +290,7 @@ func (voteSet *VoteSet) addVerifiedVote(vote *Vote, blockKey string, votingPower // NOTE: VoteSet must not be nil func (voteSet *VoteSet) SetPeerMaj23(peerID P2PID, blockID BlockID) error { if voteSet == nil { - cmn.PanicSanity("SetPeerMaj23() on nil VoteSet") + panic("SetPeerMaj23() on nil VoteSet") } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() @@ -363,7 +363,7 @@ func (voteSet *VoteSet) GetByAddress(address []byte) *Vote { defer voteSet.mtx.Unlock() valIndex, val := voteSet.valSet.GetByAddress(address) if val == nil { - cmn.PanicSanity("GetByAddress(address) returned nil") + panic("GetByAddress(address) returned nil") } return voteSet.votes[valIndex] } @@ -528,16 +528,19 @@ func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) { //-------------------------------------------------------------------------------- // Commit +// MakeCommit constructs a Commit from the VoteSet. +// Panics if the vote type is not PrecommitType or if +// there's no +2/3 votes for a single block. func (voteSet *VoteSet) MakeCommit() *Commit { if voteSet.type_ != PrecommitType { - cmn.PanicSanity("Cannot MakeCommit() unless VoteSet.Type is PrecommitType") + panic("Cannot MakeCommit() unless VoteSet.Type is PrecommitType") } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() // Make sure we have a 2/3 majority if voteSet.maj23 == nil { - cmn.PanicSanity("Cannot MakeCommit() unless a blockhash has +2/3") + panic("Cannot MakeCommit() unless a blockhash has +2/3") } // For every validator, get the precommit diff --git a/version/version.go b/version/version.go index a42a8f00..1a15717f 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ const ( // Must be a string because scripts like dist.sh read this file. // XXX: Don't change the name of this variable or you will break // automation :) - TMCoreSemVer = "0.31.3" + TMCoreSemVer = "0.31.7" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.16.0"