mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-16 21:01:59 +00:00
Compare commits
127 Commits
v0.0.1
...
josef/chec
Author | SHA1 | Date | |
---|---|---|---|
|
2506acb958 | ||
|
4214694d2b | ||
|
782c655823 | ||
|
048ac8d94b | ||
|
21bfd7fba9 | ||
|
0dd6b92a64 | ||
|
9dcee69ac2 | ||
|
b522ad0052 | ||
|
b9508ffecb | ||
|
a6ac611e77 | ||
|
e7bf25844f | ||
|
bcf10d5bae | ||
|
5997e75c84 | ||
|
97ceeed054 | ||
|
3ef9e453b7 | ||
|
c7b324d3f2 | ||
|
cfd42be0fe | ||
|
f1f243d749 | ||
|
fcce9ed4db | ||
|
c21b4fcc93 | ||
|
86cf8ee3f9 | ||
|
6f1ccb6c49 | ||
|
a076b48202 | ||
|
a7358bc69f | ||
|
27909e5d2a | ||
|
1e073817de | ||
|
2bb1a87d41 | ||
|
d0d9ef16f7 | ||
|
60b833403c | ||
|
debf8f70c9 | ||
|
4519ef3109 | ||
|
86f2765b32 | ||
|
e2f5471545 | ||
|
4905640e9b | ||
|
4c60ab83c8 | ||
|
5051a1f7bc | ||
|
8711af608f | ||
|
9926ae768e | ||
|
5df6cf563a | ||
|
2c26d95ab9 | ||
|
a2a68df521 | ||
|
43348022d6 | ||
|
40dbad9915 | ||
|
2585187880 | ||
|
d76952c674 | ||
|
7b162f5c54 | ||
|
70592cc4d8 | ||
|
90997ab1b5 | ||
|
b738add80c | ||
|
968e955c46 | ||
|
ebf815ee57 | ||
|
8db7e74b87 | ||
|
4253e67c07 | ||
|
671c5c9b84 | ||
|
f2aa1bf50e | ||
|
90465f727f | ||
|
621c0e629d | ||
|
c0e8fb5085 | ||
|
d2eab536ac | ||
|
18bd5b627a | ||
|
4474a5ec70 | ||
|
3cb7013c38 | ||
|
5b8888b01b | ||
|
439312b9c0 | ||
|
f1cf10150a | ||
|
50b87c3445 | ||
|
f2119c35de | ||
|
d35c08724c | ||
|
1c6d9d20e4 | ||
|
4695414393 | ||
|
def5c8cf12 | ||
|
b6da8880c2 | ||
|
a453628c4e | ||
|
4e4224213f | ||
|
b5b3b85697 | ||
|
18d2c45c33 | ||
|
c3df21fe82 | ||
|
bcec8be035 | ||
|
9a415b0572 | ||
|
40da355234 | ||
|
f965a4db15 | ||
|
75ffa2bf1c | ||
|
086d6cbe8c | ||
|
6cc3f4d87c | ||
|
3cfd9757a7 | ||
|
882622ec10 | ||
|
1ecf814838 | ||
|
e4a03f249d | ||
|
56d8aa42b3 | ||
|
79e9f20578 | ||
|
ab24925c94 | ||
|
0ae41cc663 | ||
|
422d04c8ba | ||
|
2233dd45bd | ||
|
9199f3f613 | ||
|
6c1a4b5137 | ||
|
c7bb998497 | ||
|
7b72436c75 | ||
|
a0234affb6 | ||
|
9390a810eb | ||
|
a49d80b89c | ||
|
ccfe75ec4a | ||
|
d586945d69 | ||
|
ae88965ff6 | ||
|
2338134836 | ||
|
1b33a50e6d | ||
|
3c7bb6b571 | ||
|
5fa540bdc9 | ||
|
52727863e1 | ||
|
e3f840e6a6 | ||
|
ed63e1f378 | ||
|
55b7118c98 | ||
|
5a25b75b1d | ||
|
a4d9539544 | ||
|
1bb8e02a96 | ||
|
6de7effb05 | ||
|
25a3c8b172 | ||
|
85be2a554e | ||
|
1d4afb179b | ||
|
660bd4a53e | ||
|
81b9bdf400 | ||
|
926127c774 | ||
|
03085c2da2 | ||
|
7af4b5086a | ||
|
60b2ae5f5a | ||
|
a6349f5063 | ||
|
22bcfca87a |
@@ -3,7 +3,7 @@ version: 2
|
||||
defaults: &defaults
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
docker:
|
||||
- image: circleci/golang:1.12.0
|
||||
- image: circleci/golang
|
||||
environment:
|
||||
GOBIN: /tmp/workspace/bin
|
||||
|
||||
@@ -14,6 +14,9 @@ docs_update_config: &docs_update_config
|
||||
environment:
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
release_management_docker: &release_management_docker
|
||||
machine: true
|
||||
|
||||
jobs:
|
||||
setup_dependencies:
|
||||
<<: *defaults
|
||||
@@ -192,7 +195,7 @@ jobs:
|
||||
name: run localnet and exit on failure
|
||||
command: |
|
||||
set -x
|
||||
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang:1.11.4 make build-linux
|
||||
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux
|
||||
make localnet-start &
|
||||
./scripts/localnet-blocks-test.sh 40 5 10 localhost
|
||||
|
||||
@@ -256,6 +259,105 @@ jobs:
|
||||
echo "Website build started"
|
||||
fi
|
||||
|
||||
prepare_build:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Get next release number
|
||||
command: |
|
||||
export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`"
|
||||
echo "Last tag: ${LAST_TAG}"
|
||||
if [ -z "${LAST_TAG}" ]; then
|
||||
export LAST_TAG="${CIRCLE_BRANCH}"
|
||||
echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag."
|
||||
fi
|
||||
export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`"
|
||||
echo "Next tag: ${NEXT_TAG}"
|
||||
echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source
|
||||
- run:
|
||||
name: Build dependencies
|
||||
command: |
|
||||
make get_tools get_vendor_deps
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- "release-version.source"
|
||||
- save_cache:
|
||||
key: v1-release-deps-{{ .Branch }}-{{ .Revision }}
|
||||
paths:
|
||||
- "vendor"
|
||||
|
||||
build_artifacts:
|
||||
<<: *defaults
|
||||
parallelism: 4
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v1-release-deps-{{ .Branch }}-{{ .Revision }}
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Build artifact
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 0 ;then export GOOS=linux GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 1 ;then export GOOS=darwin GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 2 ;then export GOOS=windows GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 3 ;then export GOOS=linux GOARCH=arm && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
- persist_to_workspace:
|
||||
root: build
|
||||
paths:
|
||||
- "*.zip"
|
||||
- "tendermint_linux_amd64"
|
||||
|
||||
release_artifacts:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Deploy to GitHub
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
echo "---"
|
||||
ls -la /tmp/workspace/*.zip
|
||||
echo "---"
|
||||
python -u scripts/release_management/sha-files.py
|
||||
echo "---"
|
||||
cat /tmp/workspace/SHA256SUMS
|
||||
echo "---"
|
||||
export RELEASE_ID="`python -u scripts/release_management/github-draft.py`"
|
||||
echo "Release ID: ${RELEASE_ID}"
|
||||
#Todo: Parallelize uploads
|
||||
export GOOS=linux GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=darwin GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=windows GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=linux GOARCH=arm && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-upload.py --file "/tmp/workspace/SHA256SUMS" --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-publish.py --id "${RELEASE_ID}"
|
||||
|
||||
release_docker:
|
||||
<<: *release_management_docker
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Deploy to Docker Hub
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
cp /tmp/workspace/tendermint_linux_amd64 DOCKER/tendermint
|
||||
docker build --label="tendermint" --tag="tendermint/tendermint:${CIRCLE_TAG}" --tag="tendermint/tendermint:latest" "DOCKER"
|
||||
docker login -u "${DOCKERHUB_USER}" --password-stdin <<< "${DOCKERHUB_PASS}"
|
||||
docker push "tendermint/tendermint"
|
||||
docker logout
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test-suite:
|
||||
@@ -292,3 +394,25 @@ workflows:
|
||||
- upload_coverage:
|
||||
requires:
|
||||
- test_cover
|
||||
release:
|
||||
jobs:
|
||||
- prepare_build
|
||||
- build_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- release_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
- release_docker:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
|
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,5 +1,12 @@
|
||||
<!-- Thanks for filing a PR! Before hitting the button, please check the following items.-->
|
||||
<!--
|
||||
|
||||
Thanks for filing a PR! Before hitting the button, please check the following items.
|
||||
Please note that every non-trivial PR must reference an issue that explains the
|
||||
changes in the PR.
|
||||
|
||||
-->
|
||||
|
||||
* [ ] Referenced an issue explaining the need for the change
|
||||
* [ ] Updated all relevant documentation in docs
|
||||
* [ ] Updated all code comments where relevant
|
||||
* [ ] Wrote tests
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -35,6 +35,7 @@ shunit2
|
||||
addrbook.json
|
||||
|
||||
*/vendor
|
||||
.vendor-new/
|
||||
*/.glide
|
||||
.terraform
|
||||
terraform.tfstate
|
||||
|
238
CHANGELOG.md
238
CHANGELOG.md
@@ -1,5 +1,243 @@
|
||||
# Changelog
|
||||
|
||||
## v0.31.6
|
||||
|
||||
*May 31st, 2019*
|
||||
|
||||
This release contains many fixes and improvements, primarily for p2p functionality.
|
||||
It also fixes a security issue in the mempool package.
|
||||
|
||||
With this release, Tendermint now supports [boltdb](https://github.com/etcd-io/bbolt), although
|
||||
in experimental mode. Feel free to try and report to us any findings/issues.
|
||||
Note also that the build tags for compiling CLevelDB have changed.
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@guagualvcha, @james-ray, @gregdhill, @climber73, @yutianwu,
|
||||
@carlosflrs, @defunctzombie, @leoluk, @needkane, @CrocdileChan
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* Go API
|
||||
- [libs/common] Removed deprecated `PanicSanity`, `PanicCrisis`,
|
||||
`PanicConsensus` and `PanicQ`
|
||||
- [mempool, state] [\#2659](https://github.com/tendermint/tendermint/issues/2659) `Mempool` now an interface that lives in the mempool package.
|
||||
See issue and PR for more details.
|
||||
- [p2p] [\#3346](https://github.com/tendermint/tendermint/issues/3346) `Reactor#InitPeer` method is added to `Reactor` interface
|
||||
- [types] [\#1648](https://github.com/tendermint/tendermint/issues/1648) `Commit#VoteSignBytes` signature was changed
|
||||
|
||||
### FEATURES:
|
||||
- [node] [\#2659](https://github.com/tendermint/tendermint/issues/2659) Add `node.Mempool()` method, which allows you to access mempool
|
||||
- [libs/db] [\#3604](https://github.com/tendermint/tendermint/pull/3604) Add experimental support for bolt db (etcd's fork of bolt) (@CrocdileChan)
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [cli] [\#3585](https://github.com/tendermint/tendermint/issues/3585) Add `--keep-addr-book` option to `unsafe_reset_all` cmd to not
|
||||
clear the address book (@climber73)
|
||||
- [cli] [\#3160](https://github.com/tendermint/tendermint/issues/3160) Add
|
||||
`--config=<path-to-config>` option to `testnet` cmd (@gregdhill)
|
||||
- [cli] [\#3661](https://github.com/tendermint/tendermint/pull/3661) Add
|
||||
`--hostname-suffix`, `--hostname` and `--random-monikers` options to `testnet`
|
||||
cmd for greater peer address/identity generation flexibility.
|
||||
- [crypto] [\#3672](https://github.com/tendermint/tendermint/issues/3672) Return more info in the `AddSignatureFromPubKey` error
|
||||
- [cs/replay] [\#3460](https://github.com/tendermint/tendermint/issues/3460) Check appHash for each block
|
||||
- [libs/db] [\#3611](https://github.com/tendermint/tendermint/issues/3611) Conditional compilation
|
||||
* Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support)
|
||||
* Use `boltdb` tag to compile Tendermint with bolt db
|
||||
- [node] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Return an error if `persistent_peers` list is invalid (except
|
||||
when IP lookup fails)
|
||||
- [p2p] [\#3463](https://github.com/tendermint/tendermint/pull/3463) Do not log "Can't add peer's address to addrbook" error for a private peer (@guagualvcha)
|
||||
- [p2p] [\#3531](https://github.com/tendermint/tendermint/issues/3531) Terminate session on nonce wrapping (@climber73)
|
||||
- [pex] [\#3647](https://github.com/tendermint/tendermint/pull/3647) Dial seeds, if any, instead of crawling peers first (@defunctzombie)
|
||||
- [rpc] [\#3534](https://github.com/tendermint/tendermint/pull/3534) Add support for batched requests/responses in JSON RPC
|
||||
- [rpc] [\#3362](https://github.com/tendermint/tendermint/issues/3362) `/dial_seeds` & `/dial_peers` return errors if addresses are
|
||||
incorrect (except when IP lookup fails)
|
||||
|
||||
### BUG FIXES:
|
||||
- [consensus] [\#3067](https://github.com/tendermint/tendermint/issues/3067) Fix replay from appHeight==0 with validator set changes (@james-ray)
|
||||
- [consensus] [\#3304](https://github.com/tendermint/tendermint/issues/3304) Create a peer state in consensus reactor before the peer
|
||||
is started (@guagualvcha)
|
||||
- [lite] [\#3669](https://github.com/tendermint/tendermint/issues/3669) Add context parameter to RPC Handlers in proxy routes (@yutianwu)
|
||||
- [mempool] [\#3322](https://github.com/tendermint/tendermint/issues/3322) When a block is committed, only remove committed txs from the mempool
|
||||
that were valid (ie. `ResponseDeliverTx.Code == 0`)
|
||||
- [p2p] [\#3338](https://github.com/tendermint/tendermint/issues/3338) Ensure `RemovePeer` is always called before `InitPeer` (upon a peer
|
||||
reconnecting to our node)
|
||||
- [p2p] [\#3532](https://github.com/tendermint/tendermint/issues/3532) Limit the number of attempts to connect to a peer in seed mode
|
||||
to 16 (as a result, the node will stop retrying after a 35 hours time window)
|
||||
- [p2p] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Allow inbound peers to be persistent, including for seed nodes.
|
||||
- [pex] [\#3603](https://github.com/tendermint/tendermint/pull/3603) Dial seeds when addrbook needs more addresses (@defunctzombie)
|
||||
|
||||
### OTHERS:
|
||||
- [networks] fixes ansible integration script (@carlosflrs)
|
||||
|
||||
## v0.31.5
|
||||
|
||||
*April 16th, 2019*
|
||||
|
||||
This release fixes a regression from v0.31.4 where, in existing chains that
|
||||
were upgraded, `/validators` could return an empty validator set. This is true
|
||||
for almost all heights, given the validator set remains the same.
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@brapse, @guagualvcha, @dongsam, @phucc
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [libs/common] `CMap`: slight optimization in `Keys()` and `Values()` (@phucc)
|
||||
- [gitignore] gitignore: add .vendor-new (@dongsam)
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [state] [\#3537](https://github.com/tendermint/tendermint/pull/3537#issuecomment-482711833)
|
||||
`LoadValidators`: do not return an empty validator set
|
||||
- [blockchain] [\#3457](https://github.com/tendermint/tendermint/issues/3457)
|
||||
Fix "peer did not send us anything" in `fast_sync` mode when under high pressure
|
||||
|
||||
## v0.31.4
|
||||
|
||||
*April 12th, 2019*
|
||||
|
||||
This release fixes a regression from v0.31.3 which used the peer's `SocketAddr` to add the peer to
|
||||
the address book. This swallowed the peer's self-reported port which is important in case of reconnect.
|
||||
It brings back `NetAddress()` to `NodeInfo` and uses it instead of `SocketAddr` for adding peers.
|
||||
Additionally, it improves response time on the `/validators` or `/status` RPC endpoints.
|
||||
As a side-effect it makes these RPC endpoint more difficult to DoS and fixes a performance degradation in `ExecCommitBlock`.
|
||||
Also, it contains an [ADR](https://github.com/tendermint/tendermint/pull/3539) that proposes decoupling the
|
||||
responsibility for peer behaviour from the `p2p.Switch` (by @brapse).
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@brapse, @guagualvcha, @mydring
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [p2p] [\#3463](https://github.com/tendermint/tendermint/pull/3463) Do not log "Can't add peer's address to addrbook" error for a private peer
|
||||
- [p2p] [\#3547](https://github.com/tendermint/tendermint/pull/3547) Fix a couple of annoying typos (@mdyring)
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [docs] [\#3514](https://github.com/tendermint/tendermint/issues/3514) Fix block.Header.Time description (@melekes)
|
||||
- [p2p] [\#2716](https://github.com/tendermint/tendermint/issues/2716) Check if we're already connected to peer right before dialing it (@melekes)
|
||||
- [p2p] [\#3545](https://github.com/tendermint/tendermint/issues/3545) Add back `NetAddress()` to `NodeInfo` and use it instead of peer's `SocketAddr()` when adding a peer to the `PEXReactor` (potential fix for [\#3532](https://github.com/tendermint/tendermint/issues/3532))
|
||||
- [state] [\#3438](https://github.com/tendermint/tendermint/pull/3438)
|
||||
Persist validators every 100000 blocks even if no changes to the set
|
||||
occurred (@guagualvcha). This
|
||||
1) Prevents possible DoS attack using `/validators` or `/status` RPC
|
||||
endpoints. Before response time was growing linearly with height if no
|
||||
changes were made to the validator set.
|
||||
2) Fixes performance degradation in `ExecCommitBlock` where we call
|
||||
`LoadValidators` for each `Evidence` in the block.
|
||||
|
||||
## v0.31.3
|
||||
|
||||
*April 1st, 2019*
|
||||
|
||||
This release includes two security sensitive fixes: it ensures generated private
|
||||
keys are valid, and it prevents certain DNS lookups that would cause the node to
|
||||
panic if the lookup failed.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
* Go API
|
||||
- [crypto/secp256k1] [\#3439](https://github.com/tendermint/tendermint/issues/3439)
|
||||
The `secp256k1.GenPrivKeySecp256k1` function has changed to guarantee that it returns a valid key, which means it
|
||||
will return a different private key than in previous versions for the same secret.
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [crypto/secp256k1] [\#3439](https://github.com/tendermint/tendermint/issues/3439)
|
||||
Ensure generated private keys are valid by randomly sampling until a valid key is found.
|
||||
Previously, it was possible (though rare!) to generate keys that exceeded the curve order.
|
||||
Such keys would lead to invalid signatures.
|
||||
- [p2p] [\#3522](https://github.com/tendermint/tendermint/issues/3522) Memoize
|
||||
socket address in peer connections to avoid DNS lookups. Previously, failed
|
||||
DNS lookups could cause the node to panic.
|
||||
|
||||
## v0.31.2
|
||||
|
||||
*March 30th, 2019*
|
||||
|
||||
This release fixes a regression from v0.31.1 where Tendermint panics under
|
||||
mempool load for external ABCI apps.
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@guagualvcha
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
|
||||
* Apps
|
||||
|
||||
* Go API
|
||||
- [libs/autofile] [\#3504](https://github.com/tendermint/tendermint/issues/3504) Remove unused code in autofile package. Deleted functions: `Group.Search`, `Group.FindLast`, `GroupReader.ReadLine`, `GroupReader.PushLine`, `MakeSimpleSearchFunc` (@guagualvcha)
|
||||
|
||||
* Blockchain Protocol
|
||||
|
||||
* P2P Protocol
|
||||
|
||||
### FEATURES:
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [circle] [\#3497](https://github.com/tendermint/tendermint/issues/3497) Move release management to CircleCI
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [mempool] [\#3512](https://github.com/tendermint/tendermint/issues/3512) Fix panic from concurrent access to txsMap, a regression for external ABCI apps introduced in v0.31.1
|
||||
|
||||
## v0.31.1
|
||||
|
||||
*March 27th, 2019*
|
||||
|
||||
This release contains a major improvement for the mempool that reduce the amount of sent data by about 30%
|
||||
(see some numbers below).
|
||||
It also fixes a memory leak in the mempool and adds TLS support to the RPC server by providing a certificate and key in the config.
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@brapse, @guagualvcha, @HaoyangLiu, @needkane, @TraceBundy
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
|
||||
* Apps
|
||||
|
||||
* Go API
|
||||
- [crypto] [\#3426](https://github.com/tendermint/tendermint/pull/3426) Remove `Ripemd160` helper method (@needkane)
|
||||
- [libs/common] [\#3429](https://github.com/tendermint/tendermint/pull/3429) Remove `RepeatTimer` (also `TimerMaker` and `Ticker` interface)
|
||||
- [rpc/client] [\#3458](https://github.com/tendermint/tendermint/issues/3458) Include `NetworkClient` interface into `Client` interface
|
||||
- [types] [\#3448](https://github.com/tendermint/tendermint/issues/3448) Remove method `PB2TM.ConsensusParams`
|
||||
|
||||
* Blockchain Protocol
|
||||
|
||||
* P2P Protocol
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#3419](https://github.com/tendermint/tendermint/issues/3419) Start HTTPS server if `rpc.tls_cert_file` and `rpc.tls_key_file` are provided in the config (@guagualvcha)
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [docs] [\#3140](https://github.com/tendermint/tendermint/issues/3140) Formalize proposer election algorithm properties
|
||||
- [docs] [\#3482](https://github.com/tendermint/tendermint/issues/3482) Fix broken links (@brapse)
|
||||
- [mempool] [\#2778](https://github.com/tendermint/tendermint/issues/2778) No longer send txs back to peers who sent it to you.
|
||||
Also, limit to 65536 active peers.
|
||||
This vastly improves the bandwidth consumption of nodes.
|
||||
For instance, for a 4 node localnet, in a test sending 250byte txs for 120 sec. at 500 txs/sec (total of 15MB):
|
||||
- total bytes received from 1st node:
|
||||
- before: 42793967 (43MB)
|
||||
- after: 30003256 (30MB)
|
||||
- total bytes sent to 1st node:
|
||||
- before: 30569339 (30MB)
|
||||
- after: 19304964 (19MB)
|
||||
- [p2p] [\#3475](https://github.com/tendermint/tendermint/issues/3475) Simplify `GetSelectionWithBias` for addressbook (@guagualvcha)
|
||||
- [rpc/lib/client] [\#3430](https://github.com/tendermint/tendermint/issues/3430) Disable compression for HTTP client to prevent GZIP-bomb DoS attacks (@guagualvcha)
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [blockchain] [\#2699](https://github.com/tendermint/tendermint/issues/2699) Update the maxHeight when a peer is removed
|
||||
- [mempool] [\#3478](https://github.com/tendermint/tendermint/issues/3478) Fix memory-leak related to `broadcastTxRoutine` (@HaoyangLiu)
|
||||
|
||||
|
||||
## v0.31.0
|
||||
|
||||
*March 16th, 2019*
|
||||
|
@@ -1,4 +1,4 @@
|
||||
## v0.32.0
|
||||
## v0.31.7
|
||||
|
||||
**
|
||||
|
||||
@@ -19,3 +19,5 @@
|
||||
### IMPROVEMENTS:
|
||||
|
||||
### BUG FIXES:
|
||||
- [mempool] \#3699 Revert the change where we only remove valid transactions
|
||||
from the mempool once a block is committed.
|
||||
|
@@ -4,6 +4,14 @@ Thank you for considering making contributions to Tendermint and related reposit
|
||||
|
||||
Please follow standard github best practices: fork the repo, branch from the tip of develop, make some commits, and submit a pull request to develop. See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with!
|
||||
|
||||
Before making a pull request, please open an issue describing the
|
||||
change you would like to make. If an issue for your change already exists,
|
||||
please comment on it that you will submit a pull request. Be sure to reference the issue in the opening
|
||||
comment of your pull request. If your change is substantial, you will be asked
|
||||
to write a more detailed design document in the form of an
|
||||
Architectural Decision Record (ie. see [here](./docs/architecture/)) before submitting code
|
||||
changes.
|
||||
|
||||
Please make sure to use `gofmt` before every commit - the easiest way to do this is have your editor run it for you upon saving a file.
|
||||
|
||||
## Forking
|
||||
@@ -105,10 +113,14 @@ removed from the header in rpc responses as well.
|
||||
|
||||
## Branching Model and Release
|
||||
|
||||
All repos should adhere to the branching model: http://nvie.com/posts/a-successful-git-branching-model/.
|
||||
We follow a variant of [git flow](http://nvie.com/posts/a-successful-git-branching-model/).
|
||||
This means that all pull-requests should be made against develop. Any merge to
|
||||
master constitutes a tagged release.
|
||||
|
||||
Note all pull requests should be squash merged except for merging to master and
|
||||
merging master back to develop. This keeps the commit history clean and makes it
|
||||
easy to reference the pull request where a change was introduced.
|
||||
|
||||
### Development Procedure:
|
||||
- the latest state of development is on `develop`
|
||||
- `develop` must never fail `make test`
|
||||
@@ -120,13 +132,13 @@ master constitutes a tagged release.
|
||||
### Pull Merge Procedure:
|
||||
- ensure pull branch is based on a recent develop
|
||||
- run `make test` to ensure that all tests pass
|
||||
- merge pull request
|
||||
- squash merge pull request
|
||||
- the `unstable` branch may be used to aggregate pull merges before fixing tests
|
||||
|
||||
### Release Procedure:
|
||||
- start on `develop`
|
||||
- run integration tests (see `test_integrations` in Makefile)
|
||||
- prepare changelog:
|
||||
- prepare release in a pull request against develop (to be squash merged):
|
||||
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all issues
|
||||
@@ -135,23 +147,15 @@ master constitutes a tagged release.
|
||||
the changelog. To lookup an alias from an email, try `bash
|
||||
./scripts/authors.sh <email>`
|
||||
- reset the `CHANGELOG_PENDING.md`
|
||||
- bump versions
|
||||
- push to release/vX.X.X to run the extended integration tests on the CI
|
||||
- merge to master
|
||||
- merge master back to develop
|
||||
- bump versions
|
||||
- push latest develop with prepared release details to release/vX.X.X to run the extended integration tests on the CI
|
||||
- if necessary, make pull requests against release/vX.X.X and squash merge them
|
||||
- merge to master (don't squash merge!)
|
||||
- merge master back to develop (don't squash merge!)
|
||||
|
||||
### Hotfix Procedure:
|
||||
- start on `master`
|
||||
- checkout a new branch named hotfix-vX.X.X
|
||||
- make the required changes
|
||||
- these changes should be small and an absolute necessity
|
||||
- add a note to CHANGELOG.md
|
||||
- bump versions
|
||||
- push to hotfix-vX.X.X to run the extended integration tests on the CI
|
||||
- merge hotfix-vX.X.X to master
|
||||
- merge hotfix-vX.X.X to develop
|
||||
- delete the hotfix-vX.X.X branch
|
||||
|
||||
- follow the normal development and release procedure without any differences
|
||||
|
||||
## Testing
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
FROM alpine:3.7
|
||||
MAINTAINER Greg Szabo <greg@tendermint.com>
|
||||
FROM alpine:3.9
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
# (unless you change `genesis_file` in config.toml). You can put your config.toml and
|
||||
|
17
Gopkg.lock
generated
17
Gopkg.lock
generated
@@ -34,6 +34,14 @@
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5f7414cf41466d4b4dd7ec52b2cd3e481e08cfd11e7e24fef730c0e483e88bb1"
|
||||
name = "github.com/etcd-io/bbolt"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "63597a96ec0ad9e6d43c3fc81e809909e0237461"
|
||||
version = "v1.3.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70"
|
||||
name = "github.com/fortytw2/leaktest"
|
||||
@@ -170,9 +178,12 @@
|
||||
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7"
|
||||
digest = "1:53e8c5c79716437e601696140e8b1801aae4204f4ec54a504333702a49572c4f"
|
||||
name = "github.com/magiconair/properties"
|
||||
packages = ["."]
|
||||
packages = [
|
||||
".",
|
||||
"assert",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "c2353362d570a7bfa228149c62842019201cfb71"
|
||||
version = "v1.8.0"
|
||||
@@ -500,6 +511,7 @@
|
||||
"github.com/btcsuite/btcd/btcec",
|
||||
"github.com/btcsuite/btcutil/base58",
|
||||
"github.com/btcsuite/btcutil/bech32",
|
||||
"github.com/etcd-io/bbolt",
|
||||
"github.com/fortytw2/leaktest",
|
||||
"github.com/go-kit/kit/log",
|
||||
"github.com/go-kit/kit/log/level",
|
||||
@@ -516,6 +528,7 @@
|
||||
"github.com/golang/protobuf/ptypes/timestamp",
|
||||
"github.com/gorilla/websocket",
|
||||
"github.com/jmhodges/levigo",
|
||||
"github.com/magiconair/properties/assert",
|
||||
"github.com/pkg/errors",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
|
@@ -22,6 +22,10 @@
|
||||
###########################################################
|
||||
|
||||
# Allow only patch releases for serialization libraries
|
||||
[[constraint]]
|
||||
name = "github.com/etcd-io/bbolt"
|
||||
version = "v1.3.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/go-amino"
|
||||
version = "~0.14.1"
|
||||
|
15
Makefile
15
Makefile
@@ -6,6 +6,7 @@ GOTOOLS = \
|
||||
github.com/square/certstrap
|
||||
GOBIN?=${GOPATH}/bin
|
||||
PACKAGES=$(shell go list ./...)
|
||||
OUTPUT?=build/tendermint
|
||||
|
||||
INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf
|
||||
BUILD_TAGS?='tendermint'
|
||||
@@ -19,19 +20,19 @@ check: check_tools get_vendor_deps
|
||||
### Build Tendermint
|
||||
|
||||
build:
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/
|
||||
|
||||
build_c:
|
||||
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o build/tendermint ./cmd/tendermint/
|
||||
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" -o $(OUTPUT) ./cmd/tendermint/
|
||||
|
||||
build_race:
|
||||
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint
|
||||
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint
|
||||
|
||||
install:
|
||||
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
|
||||
install_c:
|
||||
CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" ./cmd/tendermint
|
||||
CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" ./cmd/tendermint
|
||||
|
||||
########################################
|
||||
### Protobuf
|
||||
@@ -109,7 +110,7 @@ draw_deps:
|
||||
|
||||
get_deps_bin_size:
|
||||
@# Copy of build recipe with additional flags to perform binary size analysis
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ 2>&1))
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1))
|
||||
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
|
||||
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
|
||||
|
||||
@@ -131,7 +132,7 @@ clean_certs:
|
||||
rm -f db/remotedb/::.crt db/remotedb/::.key
|
||||
|
||||
test_libs: gen_certs
|
||||
go test -tags gcc $(PACKAGES)
|
||||
go test -tags clevedb boltdb $(PACKAGES)
|
||||
make clean_certs
|
||||
|
||||
grpc_dbserver:
|
||||
@@ -261,7 +262,7 @@ check_dep:
|
||||
### Docker image
|
||||
|
||||
build-docker:
|
||||
cp build/tendermint DOCKER/tendermint
|
||||
cp $(OUTPUT) DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
rm -rf DOCKER/tendermint
|
||||
|
||||
|
10
UPGRADING.md
10
UPGRADING.md
@@ -3,6 +3,16 @@
|
||||
This guide provides steps to be followed when you upgrade your applications to
|
||||
a newer version of Tendermint Core.
|
||||
|
||||
## v0.31.6
|
||||
|
||||
There are no breaking changes in this release except Go API of p2p and
|
||||
mempool packages. Hovewer, if you're using cleveldb, you'll need to change
|
||||
the compilation tag:
|
||||
|
||||
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support)
|
||||
|
||||
## v0.31.0
|
||||
|
||||
This release contains a breaking change to the behaviour of the pubsub system.
|
||||
|
@@ -26,16 +26,17 @@ var _ Client = (*socketClient)(nil)
|
||||
type socketClient struct {
|
||||
cmn.BaseService
|
||||
|
||||
reqQueue chan *ReqRes
|
||||
flushTimer *cmn.ThrottleTimer
|
||||
addr string
|
||||
mustConnect bool
|
||||
conn net.Conn
|
||||
|
||||
reqQueue chan *ReqRes
|
||||
flushTimer *cmn.ThrottleTimer
|
||||
|
||||
mtx sync.Mutex
|
||||
addr string
|
||||
conn net.Conn
|
||||
err error
|
||||
reqSent *list.List
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*types.Request, *types.Response) // called on all requests, if set.
|
||||
|
||||
}
|
||||
|
||||
@@ -86,6 +87,7 @@ func (cli *socketClient) OnStop() {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
if cli.conn != nil {
|
||||
// does this really need a mutex?
|
||||
cli.conn.Close()
|
||||
}
|
||||
|
||||
@@ -207,12 +209,15 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
reqres.Done() // Release waiters
|
||||
cli.reqSent.Remove(next) // Pop first item from linked list
|
||||
|
||||
// Notify reqRes listener if set
|
||||
// Notify reqRes listener if set (request specific callback).
|
||||
// NOTE: it is possible this callback isn't set on the reqres object.
|
||||
// at this point, in which case it will be called after, when it is set.
|
||||
// TODO: should we move this after the resCb call so the order is always consistent?
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
|
||||
// Notify client listener if set
|
||||
// Notify client listener if set (global callback).
|
||||
if cli.resCb != nil {
|
||||
cli.resCb(reqres.Request, res)
|
||||
}
|
||||
|
@@ -61,7 +61,7 @@ func (m *Request) Reset() { *m = Request{} }
|
||||
func (m *Request) String() string { return proto.CompactTextString(m) }
|
||||
func (*Request) ProtoMessage() {}
|
||||
func (*Request) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{0}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{0}
|
||||
}
|
||||
func (m *Request) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -483,7 +483,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} }
|
||||
func (m *RequestEcho) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestEcho) ProtoMessage() {}
|
||||
func (*RequestEcho) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{1}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{1}
|
||||
}
|
||||
func (m *RequestEcho) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -529,7 +529,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} }
|
||||
func (m *RequestFlush) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestFlush) ProtoMessage() {}
|
||||
func (*RequestFlush) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{2}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{2}
|
||||
}
|
||||
func (m *RequestFlush) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -571,7 +571,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} }
|
||||
func (m *RequestInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestInfo) ProtoMessage() {}
|
||||
func (*RequestInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{3}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{3}
|
||||
}
|
||||
func (m *RequestInfo) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -634,7 +634,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} }
|
||||
func (m *RequestSetOption) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestSetOption) ProtoMessage() {}
|
||||
func (*RequestSetOption) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{4}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{4}
|
||||
}
|
||||
func (m *RequestSetOption) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -692,7 +692,7 @@ func (m *RequestInitChain) Reset() { *m = RequestInitChain{} }
|
||||
func (m *RequestInitChain) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestInitChain) ProtoMessage() {}
|
||||
func (*RequestInitChain) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{5}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{5}
|
||||
}
|
||||
func (m *RequestInitChain) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -770,7 +770,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} }
|
||||
func (m *RequestQuery) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestQuery) ProtoMessage() {}
|
||||
func (*RequestQuery) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{6}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{6}
|
||||
}
|
||||
func (m *RequestQuery) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -841,7 +841,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} }
|
||||
func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestBeginBlock) ProtoMessage() {}
|
||||
func (*RequestBeginBlock) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{7}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{7}
|
||||
}
|
||||
func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -909,7 +909,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} }
|
||||
func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestCheckTx) ProtoMessage() {}
|
||||
func (*RequestCheckTx) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{8}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{8}
|
||||
}
|
||||
func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -956,7 +956,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} }
|
||||
func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestDeliverTx) ProtoMessage() {}
|
||||
func (*RequestDeliverTx) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{9}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{9}
|
||||
}
|
||||
func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -1003,7 +1003,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} }
|
||||
func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestEndBlock) ProtoMessage() {}
|
||||
func (*RequestEndBlock) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{10}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{10}
|
||||
}
|
||||
func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -1049,7 +1049,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} }
|
||||
func (m *RequestCommit) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestCommit) ProtoMessage() {}
|
||||
func (*RequestCommit) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{11}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{11}
|
||||
}
|
||||
func (m *RequestCommit) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -1102,7 +1102,7 @@ func (m *Response) Reset() { *m = Response{} }
|
||||
func (m *Response) String() string { return proto.CompactTextString(m) }
|
||||
func (*Response) ProtoMessage() {}
|
||||
func (*Response) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{12}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{12}
|
||||
}
|
||||
func (m *Response) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -1555,7 +1555,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} }
|
||||
func (m *ResponseException) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseException) ProtoMessage() {}
|
||||
func (*ResponseException) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{13}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{13}
|
||||
}
|
||||
func (m *ResponseException) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -1602,7 +1602,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} }
|
||||
func (m *ResponseEcho) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseEcho) ProtoMessage() {}
|
||||
func (*ResponseEcho) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{14}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{14}
|
||||
}
|
||||
func (m *ResponseEcho) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -1648,7 +1648,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} }
|
||||
func (m *ResponseFlush) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseFlush) ProtoMessage() {}
|
||||
func (*ResponseFlush) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{15}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{15}
|
||||
}
|
||||
func (m *ResponseFlush) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -1692,7 +1692,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} }
|
||||
func (m *ResponseInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseInfo) ProtoMessage() {}
|
||||
func (*ResponseInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{16}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{16}
|
||||
}
|
||||
func (m *ResponseInfo) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -1771,7 +1771,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} }
|
||||
func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseSetOption) ProtoMessage() {}
|
||||
func (*ResponseSetOption) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{17}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{17}
|
||||
}
|
||||
func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -1833,7 +1833,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} }
|
||||
func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseInitChain) ProtoMessage() {}
|
||||
func (*ResponseInitChain) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{18}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{18}
|
||||
}
|
||||
func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -1896,7 +1896,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} }
|
||||
func (m *ResponseQuery) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseQuery) ProtoMessage() {}
|
||||
func (*ResponseQuery) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{19}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{19}
|
||||
}
|
||||
func (m *ResponseQuery) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -1999,7 +1999,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} }
|
||||
func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseBeginBlock) ProtoMessage() {}
|
||||
func (*ResponseBeginBlock) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{20}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{20}
|
||||
}
|
||||
func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2053,7 +2053,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} }
|
||||
func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseCheckTx) ProtoMessage() {}
|
||||
func (*ResponseCheckTx) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{21}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{21}
|
||||
}
|
||||
func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2156,7 +2156,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} }
|
||||
func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseDeliverTx) ProtoMessage() {}
|
||||
func (*ResponseDeliverTx) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{22}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{22}
|
||||
}
|
||||
func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2254,7 +2254,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} }
|
||||
func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseEndBlock) ProtoMessage() {}
|
||||
func (*ResponseEndBlock) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{23}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{23}
|
||||
}
|
||||
func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2316,7 +2316,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} }
|
||||
func (m *ResponseCommit) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseCommit) ProtoMessage() {}
|
||||
func (*ResponseCommit) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{24}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{24}
|
||||
}
|
||||
func (m *ResponseCommit) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2367,7 +2367,7 @@ func (m *ConsensusParams) Reset() { *m = ConsensusParams{} }
|
||||
func (m *ConsensusParams) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConsensusParams) ProtoMessage() {}
|
||||
func (*ConsensusParams) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{25}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{25}
|
||||
}
|
||||
func (m *ConsensusParams) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2417,7 +2417,7 @@ func (m *ConsensusParams) GetValidator() *ValidatorParams {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockParams contains limits on the block size and timestamp.
|
||||
// BlockParams contains limits on the block size.
|
||||
type BlockParams struct {
|
||||
// Note: must be greater than 0
|
||||
MaxBytes int64 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"`
|
||||
@@ -2432,7 +2432,7 @@ func (m *BlockParams) Reset() { *m = BlockParams{} }
|
||||
func (m *BlockParams) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockParams) ProtoMessage() {}
|
||||
func (*BlockParams) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{26}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{26}
|
||||
}
|
||||
func (m *BlockParams) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2488,7 +2488,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} }
|
||||
func (m *EvidenceParams) String() string { return proto.CompactTextString(m) }
|
||||
func (*EvidenceParams) ProtoMessage() {}
|
||||
func (*EvidenceParams) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{27}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{27}
|
||||
}
|
||||
func (m *EvidenceParams) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2536,7 +2536,7 @@ func (m *ValidatorParams) Reset() { *m = ValidatorParams{} }
|
||||
func (m *ValidatorParams) String() string { return proto.CompactTextString(m) }
|
||||
func (*ValidatorParams) ProtoMessage() {}
|
||||
func (*ValidatorParams) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{28}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{28}
|
||||
}
|
||||
func (m *ValidatorParams) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2584,7 +2584,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} }
|
||||
func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*LastCommitInfo) ProtoMessage() {}
|
||||
func (*LastCommitInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{29}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{29}
|
||||
}
|
||||
func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2658,7 +2658,7 @@ func (m *Header) Reset() { *m = Header{} }
|
||||
func (m *Header) String() string { return proto.CompactTextString(m) }
|
||||
func (*Header) ProtoMessage() {}
|
||||
func (*Header) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{30}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{30}
|
||||
}
|
||||
func (m *Header) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2811,7 +2811,7 @@ func (m *Version) Reset() { *m = Version{} }
|
||||
func (m *Version) String() string { return proto.CompactTextString(m) }
|
||||
func (*Version) ProtoMessage() {}
|
||||
func (*Version) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{31}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{31}
|
||||
}
|
||||
func (m *Version) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2866,7 +2866,7 @@ func (m *BlockID) Reset() { *m = BlockID{} }
|
||||
func (m *BlockID) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockID) ProtoMessage() {}
|
||||
func (*BlockID) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{32}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{32}
|
||||
}
|
||||
func (m *BlockID) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2921,7 +2921,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} }
|
||||
func (m *PartSetHeader) String() string { return proto.CompactTextString(m) }
|
||||
func (*PartSetHeader) ProtoMessage() {}
|
||||
func (*PartSetHeader) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{33}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{33}
|
||||
}
|
||||
func (m *PartSetHeader) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -2978,7 +2978,7 @@ func (m *Validator) Reset() { *m = Validator{} }
|
||||
func (m *Validator) String() string { return proto.CompactTextString(m) }
|
||||
func (*Validator) ProtoMessage() {}
|
||||
func (*Validator) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{34}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{34}
|
||||
}
|
||||
func (m *Validator) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -3034,7 +3034,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} }
|
||||
func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) }
|
||||
func (*ValidatorUpdate) ProtoMessage() {}
|
||||
func (*ValidatorUpdate) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{35}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{35}
|
||||
}
|
||||
func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -3090,7 +3090,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} }
|
||||
func (m *VoteInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*VoteInfo) ProtoMessage() {}
|
||||
func (*VoteInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{36}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{36}
|
||||
}
|
||||
func (m *VoteInfo) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -3145,7 +3145,7 @@ func (m *PubKey) Reset() { *m = PubKey{} }
|
||||
func (m *PubKey) String() string { return proto.CompactTextString(m) }
|
||||
func (*PubKey) ProtoMessage() {}
|
||||
func (*PubKey) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{37}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{37}
|
||||
}
|
||||
func (m *PubKey) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -3203,7 +3203,7 @@ func (m *Evidence) Reset() { *m = Evidence{} }
|
||||
func (m *Evidence) String() string { return proto.CompactTextString(m) }
|
||||
func (*Evidence) ProtoMessage() {}
|
||||
func (*Evidence) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_types_a177e47fab90f91d, []int{38}
|
||||
return fileDescriptor_types_7e896a7c04915591, []int{38}
|
||||
}
|
||||
func (m *Evidence) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -15357,12 +15357,12 @@ var (
|
||||
ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_a177e47fab90f91d) }
|
||||
func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_7e896a7c04915591) }
|
||||
func init() {
|
||||
golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_a177e47fab90f91d)
|
||||
golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_7e896a7c04915591)
|
||||
}
|
||||
|
||||
var fileDescriptor_types_a177e47fab90f91d = []byte{
|
||||
var fileDescriptor_types_7e896a7c04915591 = []byte{
|
||||
// 2203 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x73, 0x1c, 0x47,
|
||||
0xf5, 0xd7, 0xec, 0xef, 0x79, 0xab, 0xfd, 0xe1, 0xb6, 0x6c, 0xaf, 0xf7, 0x9b, 0xaf, 0xe4, 0x1a,
|
||||
|
@@ -212,7 +212,7 @@ message ConsensusParams {
|
||||
ValidatorParams validator = 3;
|
||||
}
|
||||
|
||||
// BlockParams contains limits on the block size and timestamp.
|
||||
// BlockParams contains limits on the block size.
|
||||
message BlockParams {
|
||||
// Note: must be greater than 0
|
||||
int64 max_bytes = 1;
|
||||
|
@@ -69,7 +69,7 @@ type BlockPool struct {
|
||||
height int64 // the lowest key in requesters.
|
||||
// peers
|
||||
peers map[p2p.ID]*bpPeer
|
||||
maxPeerHeight int64
|
||||
maxPeerHeight int64 // the biggest reported height
|
||||
|
||||
// atomic
|
||||
numPending int32 // number of requests pending assignment or block response
|
||||
@@ -78,6 +78,8 @@ type BlockPool struct {
|
||||
errorsCh chan<- peerError
|
||||
}
|
||||
|
||||
// NewBlockPool returns a new BlockPool with the height equal to start. Block
|
||||
// requests and errors will be sent to requestsCh and errorsCh accordingly.
|
||||
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
|
||||
bp := &BlockPool{
|
||||
peers: make(map[p2p.ID]*bpPeer),
|
||||
@@ -93,15 +95,15 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
return bp
|
||||
}
|
||||
|
||||
// OnStart implements cmn.Service by spawning requesters routine and recording
|
||||
// pool's start time.
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
go pool.makeRequestersRoutine()
|
||||
pool.startTime = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pool *BlockPool) OnStop() {}
|
||||
|
||||
// Run spawns requesters as needed.
|
||||
// spawns requesters as needed
|
||||
func (pool *BlockPool) makeRequestersRoutine() {
|
||||
for {
|
||||
if !pool.IsRunning() {
|
||||
@@ -150,6 +152,8 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
}
|
||||
}
|
||||
|
||||
// GetStatus returns pool's height, numPending requests and the number of
|
||||
// requesters.
|
||||
func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -157,6 +161,7 @@ func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequester
|
||||
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
|
||||
}
|
||||
|
||||
// IsCaughtUp returns true if this node is caught up, false - otherwise.
|
||||
// TODO: relax conditions, prevent abuse.
|
||||
func (pool *BlockPool) IsCaughtUp() bool {
|
||||
pool.mtx.Lock()
|
||||
@@ -170,8 +175,9 @@ func (pool *BlockPool) IsCaughtUp() bool {
|
||||
|
||||
// Some conditions to determine if we're caught up.
|
||||
// Ensures we've either received a block or waited some amount of time,
|
||||
// and that we're synced to the highest known height. Note we use maxPeerHeight - 1
|
||||
// because to sync block H requires block H+1 to verify the LastCommit.
|
||||
// and that we're synced to the highest known height.
|
||||
// Note we use maxPeerHeight - 1 because to sync block H requires block H+1
|
||||
// to verify the LastCommit.
|
||||
receivedBlockOrTimedOut := pool.height > 0 || time.Since(pool.startTime) > 5*time.Second
|
||||
ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= (pool.maxPeerHeight-1)
|
||||
isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers
|
||||
@@ -260,14 +266,14 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int
|
||||
}
|
||||
}
|
||||
|
||||
// MaxPeerHeight returns the highest height reported by a peer.
|
||||
// MaxPeerHeight returns the highest reported height.
|
||||
func (pool *BlockPool) MaxPeerHeight() int64 {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
return pool.maxPeerHeight
|
||||
}
|
||||
|
||||
// Sets the peer's alleged blockchain height.
|
||||
// SetPeerHeight sets the peer's alleged blockchain height.
|
||||
func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -286,6 +292,8 @@ func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) {
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer removes the peer with peerID from the pool. If there's no peer
|
||||
// with peerID, function is a no-op.
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.ID) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -299,10 +307,32 @@ func (pool *BlockPool) removePeer(peerID p2p.ID) {
|
||||
requester.redo(peerID)
|
||||
}
|
||||
}
|
||||
if p, exist := pool.peers[peerID]; exist && p.timeout != nil {
|
||||
p.timeout.Stop()
|
||||
|
||||
peer, ok := pool.peers[peerID]
|
||||
if ok {
|
||||
if peer.timeout != nil {
|
||||
peer.timeout.Stop()
|
||||
}
|
||||
|
||||
delete(pool.peers, peerID)
|
||||
|
||||
// Find a new peer with the biggest height and update maxPeerHeight if the
|
||||
// peer's height was the biggest.
|
||||
if peer.height == pool.maxPeerHeight {
|
||||
pool.updateMaxPeerHeight()
|
||||
}
|
||||
}
|
||||
delete(pool.peers, peerID)
|
||||
}
|
||||
|
||||
// If no peers are left, maxPeerHeight is set to 0.
|
||||
func (pool *BlockPool) updateMaxPeerHeight() {
|
||||
var max int64
|
||||
for _, peer := range pool.peers {
|
||||
if peer.height > max {
|
||||
max = peer.height
|
||||
}
|
||||
}
|
||||
pool.maxPeerHeight = max
|
||||
}
|
||||
|
||||
// Pick an available peer with at least the given minHeight.
|
||||
|
@@ -1,12 +1,15 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -39,7 +42,9 @@ func (p testPeer) runInputRoutine() {
|
||||
func (p testPeer) simulateInput(input inputData) {
|
||||
block := &types.Block{Header: types.Header{Height: input.request.Height}}
|
||||
input.pool.AddBlock(input.request.PeerID, block, 123)
|
||||
input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
|
||||
// TODO: uncommenting this creates a race which is detected by: https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856
|
||||
// see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890
|
||||
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
|
||||
}
|
||||
|
||||
type testPeers map[p2p.ID]testPeer
|
||||
@@ -66,7 +71,7 @@ func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
|
||||
return peers
|
||||
}
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
func TestBlockPoolBasic(t *testing.T) {
|
||||
start := int64(42)
|
||||
peers := makePeers(10, start+1, 1000)
|
||||
errorsCh := make(chan peerError, 1000)
|
||||
@@ -122,7 +127,7 @@ func TestBasic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
func TestBlockPoolTimeout(t *testing.T) {
|
||||
start := int64(42)
|
||||
peers := makePeers(10, start+1, 1000)
|
||||
errorsCh := make(chan peerError, 1000)
|
||||
@@ -180,3 +185,40 @@ func TestTimeout(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
peers := make(testPeers, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
peerID := p2p.ID(fmt.Sprintf("%d", i+1))
|
||||
height := int64(i + 1)
|
||||
peers[peerID] = testPeer{peerID, height, make(chan inputData)}
|
||||
}
|
||||
requestsCh := make(chan BlockRequest)
|
||||
errorsCh := make(chan peerError)
|
||||
|
||||
pool := NewBlockPool(1, requestsCh, errorsCh)
|
||||
pool.SetLogger(log.TestingLogger())
|
||||
err := pool.Start()
|
||||
require.NoError(t, err)
|
||||
defer pool.Stop()
|
||||
|
||||
// add peers
|
||||
for peerID, peer := range peers {
|
||||
pool.SetPeerHeight(peerID, peer.height)
|
||||
}
|
||||
assert.EqualValues(t, 10, pool.MaxPeerHeight())
|
||||
|
||||
// remove not-existing peer
|
||||
assert.NotPanics(t, func() { pool.RemovePeer(p2p.ID("Superman")) })
|
||||
|
||||
// remove peer with biggest height
|
||||
pool.RemovePeer(p2p.ID("10"))
|
||||
assert.EqualValues(t, 9, pool.MaxPeerHeight())
|
||||
|
||||
// remove all peers
|
||||
for peerID := range peers {
|
||||
pool.RemovePeer(peerID)
|
||||
}
|
||||
|
||||
assert.EqualValues(t, 0, pool.MaxPeerHeight())
|
||||
}
|
||||
|
@@ -228,32 +228,40 @@ func (bcR *BlockchainReactor) poolRoutine() {
|
||||
|
||||
didProcessCh := make(chan struct{}, 1)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-bcR.Quit():
|
||||
return
|
||||
case <-bcR.pool.Quit():
|
||||
return
|
||||
case request := <-bcR.requestsCh:
|
||||
peer := bcR.Switch.Peers().Get(request.PeerID)
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height})
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
case err := <-bcR.errorsCh:
|
||||
peer := bcR.Switch.Peers().Get(err.peerID)
|
||||
if peer != nil {
|
||||
bcR.Switch.StopPeerForError(peer, err)
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() // nolint: errcheck
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
FOR_LOOP:
|
||||
for {
|
||||
select {
|
||||
case request := <-bcR.requestsCh:
|
||||
peer := bcR.Switch.Peers().Get(request.PeerID)
|
||||
if peer == nil {
|
||||
continue FOR_LOOP // Peer has since been disconnected.
|
||||
}
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height})
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
// We couldn't make the request, send-queue full.
|
||||
// The pool handles timeouts, just let it go.
|
||||
continue FOR_LOOP
|
||||
}
|
||||
|
||||
case err := <-bcR.errorsCh:
|
||||
peer := bcR.Switch.Peers().Get(err.peerID)
|
||||
if peer != nil {
|
||||
bcR.Switch.StopPeerForError(peer, err)
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() // nolint: errcheck
|
||||
|
||||
case <-switchToConsensusTicker.C:
|
||||
height, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
outbound, inbound, _ := bcR.Switch.NumPeers()
|
||||
@@ -262,7 +270,6 @@ FOR_LOOP:
|
||||
if bcR.pool.IsCaughtUp() {
|
||||
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
|
||||
bcR.pool.Stop()
|
||||
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, blocksSynced)
|
||||
|
@@ -13,6 +13,7 @@ import (
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
@@ -91,8 +92,10 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), proxyApp.Consensus(),
|
||||
sm.MockMempool{}, sm.MockEvidencePool{})
|
||||
db := dbm.NewMemDB()
|
||||
blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.MockEvidencePool{})
|
||||
sm.SaveState(db, state)
|
||||
|
||||
// let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
|
@@ -144,14 +144,14 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||
// most recent height. Otherwise they'd stall at H-1.
|
||||
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
if block == nil {
|
||||
cmn.PanicSanity("BlockStore can only save a non-nil block")
|
||||
panic("BlockStore can only save a non-nil block")
|
||||
}
|
||||
height := block.Height
|
||||
if g, w := height, bs.Height()+1; g != w {
|
||||
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
|
||||
panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
|
||||
}
|
||||
if !blockParts.IsComplete() {
|
||||
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save complete block part sets"))
|
||||
panic(fmt.Sprintf("BlockStore can only save complete block part sets"))
|
||||
}
|
||||
|
||||
// Save block meta
|
||||
@@ -188,7 +188,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
|
||||
|
||||
func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
|
||||
if height != bs.Height()+1 {
|
||||
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
|
||||
panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
|
||||
}
|
||||
partBytes := cdc.MustMarshalBinaryBare(part)
|
||||
bs.db.Set(calcBlockPartKey(height, index), partBytes)
|
||||
@@ -224,7 +224,7 @@ type BlockStoreStateJSON struct {
|
||||
func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
|
||||
bytes, err := cdc.MarshalJSON(bsj)
|
||||
if err != nil {
|
||||
cmn.PanicSanity(fmt.Sprintf("Could not marshal state bytes: %v", err))
|
||||
panic(fmt.Sprintf("Could not marshal state bytes: %v", err))
|
||||
}
|
||||
db.SetSync(blockStoreKey, bytes)
|
||||
}
|
||||
|
@@ -18,6 +18,12 @@ var ResetAllCmd = &cobra.Command{
|
||||
Run: resetAll,
|
||||
}
|
||||
|
||||
var keepAddrBook bool
|
||||
|
||||
func init() {
|
||||
ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "Keep the address book intact")
|
||||
}
|
||||
|
||||
// ResetPrivValidatorCmd resets the private validator files.
|
||||
var ResetPrivValidatorCmd = &cobra.Command{
|
||||
Use: "unsafe_reset_priv_validator",
|
||||
@@ -41,7 +47,11 @@ func resetPrivValidator(cmd *cobra.Command, args []string) {
|
||||
// ResetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
// Exported so other CLI tools can use it.
|
||||
func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
removeAddrBook(addrBookFile, logger)
|
||||
if keepAddrBook {
|
||||
logger.Info("The address book remains intact")
|
||||
} else {
|
||||
removeAddrBook(addrBookFile, logger)
|
||||
}
|
||||
if err := os.RemoveAll(dbDir); err == nil {
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
} else {
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
@@ -20,13 +21,17 @@ import (
|
||||
var (
|
||||
nValidators int
|
||||
nNonValidators int
|
||||
configFile string
|
||||
outputDir string
|
||||
nodeDirPrefix string
|
||||
|
||||
populatePersistentPeers bool
|
||||
hostnamePrefix string
|
||||
hostnameSuffix string
|
||||
startingIPAddress string
|
||||
hostnames []string
|
||||
p2pPort int
|
||||
randomMonikers bool
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -36,6 +41,8 @@ const (
|
||||
func init() {
|
||||
TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4,
|
||||
"Number of validators to initialize the testnet with")
|
||||
TestnetFilesCmd.Flags().StringVar(&configFile, "config", "",
|
||||
"Config file to use (note some options may be overwritten)")
|
||||
TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0,
|
||||
"Number of non-validators to initialize the testnet with")
|
||||
TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet",
|
||||
@@ -46,11 +53,17 @@ func init() {
|
||||
TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true,
|
||||
"Update config of each node with the list of persistent peers build using either hostname-prefix or starting-ip-address")
|
||||
TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node",
|
||||
"Hostname prefix (node results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)")
|
||||
"Hostname prefix (\"node\" results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)")
|
||||
TestnetFilesCmd.Flags().StringVar(&hostnameSuffix, "hostname-suffix", "",
|
||||
"Hostname suffix (\".xyz.com\" results in persistent peers list ID0@node0.xyz.com:26656, ID1@node1.xyz.com:26656, ...)")
|
||||
TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "",
|
||||
"Starting IP address (192.168.0.1 results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)")
|
||||
"Starting IP address (\"192.168.0.1\" results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)")
|
||||
TestnetFilesCmd.Flags().StringArrayVar(&hostnames, "hostname", []string{},
|
||||
"Manually override all hostnames of validators and non-validators (use --hostname multiple times for multiple hosts)")
|
||||
TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 26656,
|
||||
"P2P Port")
|
||||
TestnetFilesCmd.Flags().BoolVar(&randomMonikers, "random-monikers", false,
|
||||
"Randomize the moniker for each generated node")
|
||||
}
|
||||
|
||||
// TestnetFilesCmd allows initialisation of files for a Tendermint testnet.
|
||||
@@ -72,7 +85,29 @@ Example:
|
||||
}
|
||||
|
||||
func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
if len(hostnames) > 0 && len(hostnames) != (nValidators+nNonValidators) {
|
||||
return fmt.Errorf(
|
||||
"testnet needs precisely %d hostnames (number of validators plus non-validators) if --hostname parameter is used",
|
||||
nValidators+nNonValidators,
|
||||
)
|
||||
}
|
||||
|
||||
config := cfg.DefaultConfig()
|
||||
|
||||
// overwrite default config if set and valid
|
||||
if configFile != "" {
|
||||
viper.SetConfigFile(configFile)
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := viper.Unmarshal(config); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := config.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
genVals := make([]types.GenesisValidator, nValidators)
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
@@ -162,6 +197,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
if populatePersistentPeers {
|
||||
config.P2P.PersistentPeers = persistentPeers
|
||||
}
|
||||
config.Moniker = moniker(i)
|
||||
|
||||
cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config)
|
||||
}
|
||||
@@ -171,21 +207,23 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func hostnameOrIP(i int) string {
|
||||
if startingIPAddress != "" {
|
||||
ip := net.ParseIP(startingIPAddress)
|
||||
ip = ip.To4()
|
||||
if ip == nil {
|
||||
fmt.Printf("%v: non ipv4 address\n", startingIPAddress)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for j := 0; j < i; j++ {
|
||||
ip[3]++
|
||||
}
|
||||
return ip.String()
|
||||
if len(hostnames) > 0 && i < len(hostnames) {
|
||||
return hostnames[i]
|
||||
}
|
||||
if startingIPAddress == "" {
|
||||
return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix)
|
||||
}
|
||||
ip := net.ParseIP(startingIPAddress)
|
||||
ip = ip.To4()
|
||||
if ip == nil {
|
||||
fmt.Printf("%v: non ipv4 address\n", startingIPAddress)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s%d", hostnamePrefix, i)
|
||||
for j := 0; j < i; j++ {
|
||||
ip[3]++
|
||||
}
|
||||
return ip.String()
|
||||
}
|
||||
|
||||
func persistentPeersString(config *cfg.Config) (string, error) {
|
||||
@@ -201,3 +239,20 @@ func persistentPeersString(config *cfg.Config) (string, error) {
|
||||
}
|
||||
return strings.Join(persistentPeers, ","), nil
|
||||
}
|
||||
|
||||
func moniker(i int) string {
|
||||
if randomMonikers {
|
||||
return randomMoniker()
|
||||
}
|
||||
if len(hostnames) > 0 && i < len(hostnames) {
|
||||
return hostnames[i]
|
||||
}
|
||||
if startingIPAddress == "" {
|
||||
return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix)
|
||||
}
|
||||
return randomMoniker()
|
||||
}
|
||||
|
||||
func randomMoniker() string {
|
||||
return cmn.HexBytes(cmn.RandBytes(8)).String()
|
||||
}
|
||||
|
@@ -153,7 +153,18 @@ type BaseConfig struct {
|
||||
// and verifying their commits
|
||||
FastSync bool `mapstructure:"fast_sync"`
|
||||
|
||||
// Database backend: leveldb | memdb | cleveldb
|
||||
// Database backend: goleveldb | cleveldb | boltdb
|
||||
// * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
||||
// - pure go
|
||||
// - stable
|
||||
// * cleveldb (uses levigo wrapper)
|
||||
// - fast
|
||||
// - requires gcc
|
||||
// - use cleveldb build tag (go build -tags cleveldb)
|
||||
// * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
|
||||
// - EXPERIMENTAL
|
||||
// - may be faster is some use-cases (random reads - indexer)
|
||||
// - use boltdb build tag (go build -tags boltdb)
|
||||
DBBackend string `mapstructure:"db_backend"`
|
||||
|
||||
// Database directory
|
||||
@@ -207,7 +218,7 @@ func DefaultBaseConfig() BaseConfig {
|
||||
ProfListenAddress: "",
|
||||
FastSync: true,
|
||||
FilterPeers: false,
|
||||
DBBackend: "leveldb",
|
||||
DBBackend: "goleveldb",
|
||||
DBPath: "data",
|
||||
}
|
||||
}
|
||||
@@ -339,6 +350,20 @@ type RPCConfig struct {
|
||||
// global HTTP write timeout, which applies to all connections and endpoints.
|
||||
// See https://github.com/tendermint/tendermint/issues/3435
|
||||
TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout_broadcast_tx_commit"`
|
||||
|
||||
// The name of a file containing certificate that is used to create the HTTPS server.
|
||||
//
|
||||
// If the certificate is signed by a certificate authority,
|
||||
// the certFile should be the concatenation of the server's certificate, any intermediates,
|
||||
// and the CA's certificate.
|
||||
//
|
||||
// NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
|
||||
TLSCertFile string `mapstructure:"tls_cert_file"`
|
||||
|
||||
// The name of a file containing matching private key that is used to create the HTTPS server.
|
||||
//
|
||||
// NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
|
||||
TLSKeyFile string `mapstructure:"tls_key_file"`
|
||||
}
|
||||
|
||||
// DefaultRPCConfig returns a default configuration for the RPC server
|
||||
@@ -357,6 +382,9 @@ func DefaultRPCConfig() *RPCConfig {
|
||||
MaxSubscriptionClients: 100,
|
||||
MaxSubscriptionsPerClient: 5,
|
||||
TimeoutBroadcastTxCommit: 10 * time.Second,
|
||||
|
||||
TLSCertFile: "",
|
||||
TLSKeyFile: "",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -395,6 +423,18 @@ func (cfg *RPCConfig) IsCorsEnabled() bool {
|
||||
return len(cfg.CORSAllowedOrigins) != 0
|
||||
}
|
||||
|
||||
func (cfg RPCConfig) KeyFile() string {
|
||||
return rootify(filepath.Join(defaultConfigDir, cfg.TLSKeyFile), cfg.RootDir)
|
||||
}
|
||||
|
||||
func (cfg RPCConfig) CertFile() string {
|
||||
return rootify(filepath.Join(defaultConfigDir, cfg.TLSCertFile), cfg.RootDir)
|
||||
}
|
||||
|
||||
func (cfg RPCConfig) IsTLSEnabled() bool {
|
||||
return cfg.TLSCertFile != "" && cfg.TLSKeyFile != ""
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// P2PConfig
|
||||
|
||||
|
@@ -28,13 +28,13 @@ func init() {
|
||||
// and panics if it fails.
|
||||
func EnsureRoot(rootDir string) {
|
||||
if err := cmn.EnsureDir(rootDir, DefaultDirPerm); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
panic(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
panic(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
|
||||
@@ -81,7 +81,18 @@ moniker = "{{ .BaseConfig.Moniker }}"
|
||||
# and verifying their commits
|
||||
fast_sync = {{ .BaseConfig.FastSync }}
|
||||
|
||||
# Database backend: leveldb | memdb | cleveldb
|
||||
# Database backend: goleveldb | cleveldb | boltdb
|
||||
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
||||
# - pure go
|
||||
# - stable
|
||||
# * cleveldb (uses levigo wrapper)
|
||||
# - fast
|
||||
# - requires gcc
|
||||
# - use cleveldb build tag (go build -tags cleveldb)
|
||||
# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
|
||||
# - EXPERIMENTAL
|
||||
# - may be faster is some use-cases (random reads - indexer)
|
||||
# - use boltdb build tag (go build -tags boltdb)
|
||||
db_backend = "{{ .BaseConfig.DBBackend }}"
|
||||
|
||||
# Database directory
|
||||
@@ -181,6 +192,17 @@ max_subscriptions_per_client = {{ .RPC.MaxSubscriptionsPerClient }}
|
||||
# See https://github.com/tendermint/tendermint/issues/3435
|
||||
timeout_broadcast_tx_commit = "{{ .RPC.TimeoutBroadcastTxCommit }}"
|
||||
|
||||
# The name of a file containing certificate that is used to create the HTTPS server.
|
||||
# If the certificate is signed by a certificate authority,
|
||||
# the certFile should be the concatenation of the server's certificate, any intermediates,
|
||||
# and the CA's certificate.
|
||||
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
|
||||
tls_cert_file = "{{ .RPC.TLSCertFile }}"
|
||||
|
||||
# The name of a file containing matching private key that is used to create the HTTPS server.
|
||||
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
|
||||
tls_key_file = "{{ .RPC.TLSKeyFile }}"
|
||||
|
||||
##### peer to peer configuration options #####
|
||||
[p2p]
|
||||
|
||||
|
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -69,7 +70,7 @@ func TestByzantine(t *testing.T) {
|
||||
blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
|
||||
conR := NewConsensusReactor(css[i], true) // so we don't start the consensus states
|
||||
conR.SetLogger(logger.With("validator", i))
|
||||
conR.SetEventBus(eventBus)
|
||||
|
||||
@@ -81,6 +82,7 @@ func TestByzantine(t *testing.T) {
|
||||
}
|
||||
|
||||
reactors[i] = conRI
|
||||
sm.SaveState(css[i].blockExec.DB(), css[i].state) //for save height 1's validators info
|
||||
}
|
||||
|
||||
defer func() {
|
||||
@@ -268,3 +270,4 @@ func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
br.reactor.Receive(chID, peer, msgBytes)
|
||||
}
|
||||
func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }
|
||||
|
@@ -14,6 +14,8 @@ import (
|
||||
|
||||
"github.com/go-kit/kit/log/term"
|
||||
|
||||
"path"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
@@ -119,6 +121,24 @@ func incrementRound(vss ...*validatorStub) {
|
||||
}
|
||||
}
|
||||
|
||||
type ValidatorStubsByAddress []*validatorStub
|
||||
|
||||
func (vss ValidatorStubsByAddress) Len() int {
|
||||
return len(vss)
|
||||
}
|
||||
|
||||
func (vss ValidatorStubsByAddress) Less(i, j int) bool {
|
||||
return bytes.Compare(vss[i].GetPubKey().Address(), vss[j].GetPubKey().Address()) == -1
|
||||
}
|
||||
|
||||
func (vss ValidatorStubsByAddress) Swap(i, j int) {
|
||||
it := vss[i]
|
||||
vss[i] = vss[j]
|
||||
vss[i].Index = i
|
||||
vss[j] = it
|
||||
vss[j].Index = j
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
// Functions for transitioning the consensus state
|
||||
|
||||
@@ -228,7 +248,7 @@ func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lo
|
||||
}
|
||||
|
||||
func subscribeToVoter(cs *ConsensusState, addr []byte) <-chan tmpubsub.Message {
|
||||
votesSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote)
|
||||
votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote))
|
||||
}
|
||||
@@ -268,7 +288,7 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
@@ -278,7 +298,8 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
|
||||
evpool := sm.MockEvidencePool{}
|
||||
|
||||
// Make ConsensusState
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateDB := blockDB
|
||||
sm.SaveState(stateDB, state) //for save height 1's validators info
|
||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
|
||||
cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
@@ -351,7 +372,7 @@ func ensureNoNewUnlock(unlockCh <-chan tmpubsub.Message) {
|
||||
}
|
||||
|
||||
func ensureNoNewTimeout(stepCh <-chan tmpubsub.Message, timeout int64) {
|
||||
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
|
||||
timeoutDuration := time.Duration(timeout*10) * time.Nanosecond
|
||||
ensureNoNewEvent(
|
||||
stepCh,
|
||||
timeoutDuration,
|
||||
@@ -398,7 +419,7 @@ func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int) {
|
||||
}
|
||||
|
||||
func ensureNewTimeout(timeoutCh <-chan tmpubsub.Message, height int64, round int, timeout int64) {
|
||||
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
|
||||
timeoutDuration := time.Duration(timeout*10) * time.Nanosecond
|
||||
ensureNewEvent(timeoutCh, height, round, timeoutDuration,
|
||||
"Timeout expired while waiting for NewTimeout event")
|
||||
}
|
||||
@@ -564,7 +585,7 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app)
|
||||
css[i] = newConsensusStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB)
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||
}
|
||||
@@ -576,12 +597,11 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
||||
}
|
||||
|
||||
// nPeers = nValidators + nNotValidator
|
||||
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker,
|
||||
appFunc func() abci.Application) ([]*ConsensusState, cleanupFunc) {
|
||||
|
||||
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func(string) abci.Application) ([]*ConsensusState, *types.GenesisDoc, *cfg.Config, cleanupFunc) {
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
|
||||
css := make([]*ConsensusState, nPeers)
|
||||
logger := consensusLogger()
|
||||
var peer0Config *cfg.Config
|
||||
configRootDirs := make([]string, 0, nPeers)
|
||||
for i := 0; i < nPeers; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
@@ -589,6 +609,9 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
if i == 0 {
|
||||
peer0Config = thisConfig
|
||||
}
|
||||
var privVal types.PrivValidator
|
||||
if i < nValidators {
|
||||
privVal = privVals[i]
|
||||
@@ -605,15 +628,19 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
||||
privVal = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name())
|
||||
}
|
||||
|
||||
app := appFunc()
|
||||
app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i)))
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok {
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version. If don't do this, replay test will fail
|
||||
}
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
//sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above
|
||||
|
||||
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app)
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||
}
|
||||
return css, func() {
|
||||
return css, genDoc, peer0Config, func() {
|
||||
for _, dir := range configRootDirs {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
@@ -719,3 +746,7 @@ func newPersistentKVStore() abci.Application {
|
||||
}
|
||||
return kvstore.NewPersistentKVStoreApplication(dir)
|
||||
}
|
||||
|
||||
func newPersistentKVStoreWithPath(dbDir string) abci.Application {
|
||||
return kvstore.NewPersistentKVStoreApplication(dbDir)
|
||||
}
|
||||
|
@@ -11,13 +11,15 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// for testing
|
||||
func assertMempool(txn txNotifier) sm.Mempool {
|
||||
return txn.(sm.Mempool)
|
||||
func assertMempool(txn txNotifier) mempl.Mempool {
|
||||
return txn.(mempl.Mempool)
|
||||
}
|
||||
|
||||
func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
@@ -106,7 +108,9 @@ func deliverTxsRange(cs *ConsensusState, start, end int) {
|
||||
|
||||
func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newConsensusState(state, privVals[0], NewCounterApplication())
|
||||
blockDB := dbm.NewMemDB()
|
||||
cs := newConsensusStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB)
|
||||
sm.SaveState(blockDB, state)
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
|
||||
|
||||
@@ -129,7 +133,9 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
func TestMempoolRmBadTx(t *testing.T) {
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
app := NewCounterApplication()
|
||||
cs := newConsensusState(state, privVals[0], app)
|
||||
blockDB := dbm.NewMemDB()
|
||||
cs := newConsensusStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB)
|
||||
sm.SaveState(blockDB, state)
|
||||
|
||||
// increment the counter by 1
|
||||
txBytes := make([]byte, 8)
|
||||
|
@@ -155,16 +155,24 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor
|
||||
// InitPeer implements Reactor by creating a state for the peer.
|
||||
func (conR *ConsensusReactor) InitPeer(peer p2p.Peer) p2p.Peer {
|
||||
peerState := NewPeerState(peer).SetLogger(conR.Logger)
|
||||
peer.Set(types.PeerStateKey, peerState)
|
||||
return peer
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by spawning multiple gossiping goroutines for the
|
||||
// peer.
|
||||
func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) {
|
||||
if !conR.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
// Create peerState for peer
|
||||
peerState := NewPeerState(peer).SetLogger(conR.Logger)
|
||||
peer.Set(types.PeerStateKey, peerState)
|
||||
|
||||
peerState, ok := peer.Get(types.PeerStateKey).(*PeerState)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("peer %v has no state", peer))
|
||||
}
|
||||
// Begin routines for this peer.
|
||||
go conR.gossipDataRoutine(peer, peerState)
|
||||
go conR.gossipVotesRoutine(peer, peerState)
|
||||
@@ -177,7 +185,7 @@ func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) {
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor
|
||||
// RemovePeer is a noop.
|
||||
func (conR *ConsensusReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
if !conR.IsRunning() {
|
||||
return
|
||||
@@ -491,7 +499,7 @@ OUTER_LOOP:
|
||||
if prs.ProposalBlockParts == nil {
|
||||
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
|
||||
if blockMeta == nil {
|
||||
cmn.PanicCrisis(fmt.Sprintf("Failed to load block %d when blockStore is at %d",
|
||||
panic(fmt.Sprintf("Failed to load block %d when blockStore is at %d",
|
||||
prs.Height, conR.conS.blockStore.Height()))
|
||||
}
|
||||
ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader)
|
||||
@@ -1110,7 +1118,7 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida
|
||||
NOTE: This is wrong, 'round' could change.
|
||||
e.g. if orig round is not the same as block LastCommit round.
|
||||
if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round {
|
||||
cmn.PanicSanity(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
|
||||
panic(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
|
||||
}
|
||||
*/
|
||||
if ps.PRS.CatchupCommitRound == round {
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/mock"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -51,6 +52,10 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) (
|
||||
blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
require.NoError(t, err)
|
||||
blocksSubs = append(blocksSubs, blocksSub)
|
||||
|
||||
if css[i].state.LastBlockHeight == 0 { //simulate handle initChain in handshake
|
||||
sm.SaveState(css[i].blockExec.DB(), css[i].state)
|
||||
}
|
||||
}
|
||||
// make connected switches and start all reactors
|
||||
p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
@@ -136,7 +141,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
@@ -239,6 +244,49 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
}, css)
|
||||
}
|
||||
|
||||
func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, _, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = mock.NewPeer(nil)
|
||||
msg = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType})
|
||||
)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
reactor.AddPeer(peer)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, _, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = mock.NewPeer(nil)
|
||||
msg = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType})
|
||||
)
|
||||
|
||||
// we should call InitPeer here
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.Panics(t, func() {
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
// Test we record stats about votes and block parts from other peers.
|
||||
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
N := 4
|
||||
@@ -329,7 +377,8 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
nPeers := 7
|
||||
nVals := 4
|
||||
css, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
css, _, _, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStoreWithPath)
|
||||
|
||||
defer cleanup()
|
||||
logger := log.TestingLogger()
|
||||
|
||||
|
@@ -13,10 +13,10 @@ import (
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
//auto "github.com/tendermint/tendermint/libs/autofile"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -230,6 +230,7 @@ func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) {
|
||||
h.eventBus = eventBus
|
||||
}
|
||||
|
||||
// NBlocks returns the number of blocks applied to the state.
|
||||
func (h *Handshaker) NBlocks() int {
|
||||
return h.nBlocks
|
||||
}
|
||||
@@ -257,13 +258,15 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
)
|
||||
|
||||
// Set AppVersion on the state.
|
||||
h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion)
|
||||
sm.SaveState(h.stateDB, h.initialState)
|
||||
if h.initialState.Version.Consensus.App != version.Protocol(res.AppVersion) {
|
||||
h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion)
|
||||
sm.SaveState(h.stateDB, h.initialState)
|
||||
}
|
||||
|
||||
// Replay blocks up to the latest in the blockstore.
|
||||
_, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error on replay: %v", err)
|
||||
return fmt.Errorf("error on replay: %v", err)
|
||||
}
|
||||
|
||||
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced",
|
||||
@@ -274,7 +277,8 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Replay all blocks since appBlockHeight and ensure the result matches the current state.
|
||||
// ReplayBlocks replays all blocks since appBlockHeight and ensures the result
|
||||
// matches the current state.
|
||||
// Returns the final AppHash or an error.
|
||||
func (h *Handshaker) ReplayBlocks(
|
||||
state sm.State,
|
||||
@@ -319,12 +323,12 @@ func (h *Handshaker) ReplayBlocks(
|
||||
} else {
|
||||
// If validator set is not set in genesis and still empty after InitChain, exit.
|
||||
if len(h.genDoc.Validators) == 0 {
|
||||
return nil, fmt.Errorf("Validator set is nil in genesis and still empty after InitChain")
|
||||
return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain")
|
||||
}
|
||||
}
|
||||
|
||||
if res.ConsensusParams != nil {
|
||||
state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams, state.ConsensusParams.Block.TimeIotaMs)
|
||||
state.ConsensusParams = state.ConsensusParams.Update(res.ConsensusParams)
|
||||
}
|
||||
sm.SaveState(h.stateDB, state)
|
||||
}
|
||||
@@ -332,7 +336,8 @@ func (h *Handshaker) ReplayBlocks(
|
||||
|
||||
// First handle edge cases and constraints on the storeBlockHeight.
|
||||
if storeBlockHeight == 0 {
|
||||
return appHash, checkAppHash(state, appHash)
|
||||
assertAppHashEqualsOneFromState(appHash, state)
|
||||
return appHash, nil
|
||||
|
||||
} else if storeBlockHeight < appBlockHeight {
|
||||
// the app should never be ahead of the store (but this is under app's control)
|
||||
@@ -340,11 +345,11 @@ func (h *Handshaker) ReplayBlocks(
|
||||
|
||||
} else if storeBlockHeight < stateBlockHeight {
|
||||
// the state should never be ahead of the store (this is under tendermint's control)
|
||||
cmn.PanicSanity(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
|
||||
panic(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
|
||||
|
||||
} else if storeBlockHeight > stateBlockHeight+1 {
|
||||
// store should be at most one ahead of the state (this is under tendermint's control)
|
||||
cmn.PanicSanity(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
|
||||
panic(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
|
||||
}
|
||||
|
||||
var err error
|
||||
@@ -359,7 +364,8 @@ func (h *Handshaker) ReplayBlocks(
|
||||
|
||||
} else if appBlockHeight == storeBlockHeight {
|
||||
// We're good!
|
||||
return appHash, checkAppHash(state, appHash)
|
||||
assertAppHashEqualsOneFromState(appHash, state)
|
||||
return appHash, nil
|
||||
}
|
||||
|
||||
} else if storeBlockHeight == stateBlockHeight+1 {
|
||||
@@ -380,7 +386,7 @@ func (h *Handshaker) ReplayBlocks(
|
||||
return state.AppHash, err
|
||||
|
||||
} else if appBlockHeight == storeBlockHeight {
|
||||
// We ran Commit, but didn't save the state, so replayBlock with mock app
|
||||
// We ran Commit, but didn't save the state, so replayBlock with mock app.
|
||||
abciResponses, err := sm.LoadABCIResponses(h.stateDB, storeBlockHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -393,8 +399,8 @@ func (h *Handshaker) ReplayBlocks(
|
||||
|
||||
}
|
||||
|
||||
cmn.PanicSanity("Should never happen")
|
||||
return nil, nil
|
||||
panic(fmt.Sprintf("uncovered case! appHeight: %d, storeHeight: %d, stateHeight: %d",
|
||||
appBlockHeight, storeBlockHeight, stateBlockHeight))
|
||||
}
|
||||
|
||||
func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int64, mutateState bool) ([]byte, error) {
|
||||
@@ -417,7 +423,12 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
|
||||
for i := appBlockHeight + 1; i <= finalBlock; i++ {
|
||||
h.logger.Info("Applying block", "height", i)
|
||||
block := h.store.LoadBlock(i)
|
||||
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, state.LastValidators, h.stateDB)
|
||||
// Extra check to ensure the app was not changed in a way it shouldn't have.
|
||||
if len(appHash) > 0 {
|
||||
assertAppHashEqualsOneFromBlock(appHash, block)
|
||||
}
|
||||
|
||||
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -434,7 +445,8 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
|
||||
appHash = state.AppHash
|
||||
}
|
||||
|
||||
return appHash, checkAppHash(state, appHash)
|
||||
assertAppHashEqualsOneFromState(appHash, state)
|
||||
return appHash, nil
|
||||
}
|
||||
|
||||
// ApplyBlock on the proxyApp with the last block.
|
||||
@@ -442,7 +454,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
|
||||
block := h.store.LoadBlock(height)
|
||||
meta := h.store.LoadBlockMeta(height)
|
||||
|
||||
blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, sm.MockMempool{}, sm.MockEvidencePool{})
|
||||
blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, mock.Mempool{}, sm.MockEvidencePool{})
|
||||
blockExec.SetEventBus(h.eventBus)
|
||||
|
||||
var err error
|
||||
@@ -456,11 +468,26 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func checkAppHash(state sm.State, appHash []byte) error {
|
||||
if !bytes.Equal(state.AppHash, appHash) {
|
||||
panic(fmt.Errorf("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, state.AppHash).Error())
|
||||
func assertAppHashEqualsOneFromBlock(appHash []byte, block *types.Block) {
|
||||
if !bytes.Equal(appHash, block.AppHash) {
|
||||
panic(fmt.Sprintf(`block.AppHash does not match AppHash after replay. Got %X, expected %X.
|
||||
|
||||
Block: %v
|
||||
`,
|
||||
appHash, block.AppHash, block))
|
||||
}
|
||||
}
|
||||
|
||||
func assertAppHashEqualsOneFromState(appHash []byte, state sm.State) {
|
||||
if !bytes.Equal(appHash, state.AppHash) {
|
||||
panic(fmt.Sprintf(`state.AppHash does not match AppHash after replay. Got
|
||||
%X, expected %X.
|
||||
|
||||
State: %v
|
||||
|
||||
Did you reset Tendermint without resetting your application's data?`,
|
||||
appHash, state.AppHash, state))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
@@ -491,6 +518,9 @@ type mockProxyApp struct {
|
||||
func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx {
|
||||
r := mock.abciResponses.DeliverTx[mock.txCount]
|
||||
mock.txCount++
|
||||
if r == nil { //it could be nil because of amino unMarshall, it will cause an empty ResponseDeliverTx to become nil
|
||||
return abci.ResponseDeliverTx{}
|
||||
}
|
||||
return *r
|
||||
}
|
||||
|
||||
|
@@ -16,6 +16,7 @@ import (
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -312,7 +313,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
cmn.Exit(fmt.Sprintf("Error on handshake: %v", err))
|
||||
}
|
||||
|
||||
mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{}
|
||||
mempool, evpool := mock.Mempool{}, sm.MockEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
||||
|
||||
consensusState := NewConsensusState(csConfig, state.Copy(), blockExec,
|
||||
|
@@ -7,7 +7,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -15,16 +15,21 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"sort"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
@@ -88,7 +93,7 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig *
|
||||
}
|
||||
}
|
||||
|
||||
func sendTxs(cs *ConsensusState, ctx context.Context) {
|
||||
func sendTxs(ctx context.Context, cs *ConsensusState) {
|
||||
for i := 0; i < 256; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -113,7 +118,7 @@ func TestWALCrash(t *testing.T) {
|
||||
1},
|
||||
{"many non-empty blocks",
|
||||
func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) {
|
||||
go sendTxs(cs, ctx)
|
||||
go sendTxs(ctx, cs)
|
||||
},
|
||||
3},
|
||||
}
|
||||
@@ -138,10 +143,10 @@ LOOP:
|
||||
|
||||
// create consensus state from a clean slate
|
||||
logger := log.NewNopLogger()
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := blockDB
|
||||
state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
blockDB := dbm.NewMemDB()
|
||||
cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB)
|
||||
cs.SetLogger(logger)
|
||||
|
||||
@@ -260,15 +265,23 @@ func (w *crashingWAL) Stop() error { return w.next.Stop() }
|
||||
func (w *crashingWAL) Wait() { w.next.Wait() }
|
||||
|
||||
//------------------------------------------------------------------------------------------
|
||||
// Handshake Tests
|
||||
type testSim struct {
|
||||
GenesisState sm.State
|
||||
Config *cfg.Config
|
||||
Chain []*types.Block
|
||||
Commits []*types.Commit
|
||||
CleanupFunc cleanupFunc
|
||||
}
|
||||
|
||||
const (
|
||||
NUM_BLOCKS = 6
|
||||
numBlocks = 6
|
||||
)
|
||||
|
||||
var (
|
||||
mempool = sm.MockMempool{}
|
||||
mempool = mock.Mempool{}
|
||||
evpool = sm.MockEvidencePool{}
|
||||
|
||||
sim testSim
|
||||
)
|
||||
|
||||
//---------------------------------------
|
||||
@@ -279,93 +292,356 @@ var (
|
||||
// 2 - save block and committed but state is behind
|
||||
var modes = []uint{0, 1, 2}
|
||||
|
||||
// This is actually not a test, it's for storing validator change tx data for testHandshakeReplay
|
||||
func TestSimulateValidatorsChange(t *testing.T) {
|
||||
nPeers := 7
|
||||
nVals := 4
|
||||
css, genDoc, config, cleanup := randConsensusNetWithPeers(nVals, nPeers, "replay_test", newMockTickerFunc(true), newPersistentKVStoreWithPath)
|
||||
sim.Config = config
|
||||
sim.GenesisState, _ = sm.MakeGenesisState(genDoc)
|
||||
sim.CleanupFunc = cleanup
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
|
||||
newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound)
|
||||
proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal)
|
||||
|
||||
vss := make([]*validatorStub, nPeers)
|
||||
for i := 0; i < nPeers; i++ {
|
||||
vss[i] = NewValidatorStub(css[i].privValidator, i)
|
||||
}
|
||||
height, round := css[0].Height, css[0].Round
|
||||
// start the machine
|
||||
startTestRound(css[0], height, round)
|
||||
incrementHeight(vss...)
|
||||
ensureNewRound(newRoundCh, height, 0)
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
rs := css[0].GetRoundState()
|
||||
signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...)
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
//height 2
|
||||
height++
|
||||
incrementHeight(vss...)
|
||||
newValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
||||
valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1)
|
||||
newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
|
||||
err := assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil)
|
||||
assert.Nil(t, err)
|
||||
propBlock, _ := css[0].createProposalBlock() //changeProposer(t, cs1, vs2)
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
blockID := types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()}
|
||||
proposal := types.NewProposal(vss[1].Height, round, -1, blockID)
|
||||
if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
}
|
||||
|
||||
// set the proposal block
|
||||
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
rs = css[0].GetRoundState()
|
||||
signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...)
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
//height 3
|
||||
height++
|
||||
incrementHeight(vss...)
|
||||
updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
||||
updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1)
|
||||
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
|
||||
err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil)
|
||||
assert.Nil(t, err)
|
||||
propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2)
|
||||
propBlockParts = propBlock.MakePartSet(partSize)
|
||||
blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()}
|
||||
proposal = types.NewProposal(vss[2].Height, round, -1, blockID)
|
||||
if err := vss[2].SignProposal(config.ChainID(), proposal); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
}
|
||||
|
||||
// set the proposal block
|
||||
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
rs = css[0].GetRoundState()
|
||||
signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...)
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
//height 4
|
||||
height++
|
||||
incrementHeight(vss...)
|
||||
newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey()
|
||||
newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2)
|
||||
newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
|
||||
err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil)
|
||||
assert.Nil(t, err)
|
||||
newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey()
|
||||
newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3)
|
||||
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
|
||||
err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, nil)
|
||||
assert.Nil(t, err)
|
||||
propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2)
|
||||
propBlockParts = propBlock.MakePartSet(partSize)
|
||||
blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()}
|
||||
newVss := make([]*validatorStub, nVals+1)
|
||||
copy(newVss, vss[:nVals+1])
|
||||
sort.Sort(ValidatorStubsByAddress(newVss))
|
||||
selfIndex := 0
|
||||
for i, vs := range newVss {
|
||||
if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) {
|
||||
selfIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
proposal = types.NewProposal(vss[3].Height, round, -1, blockID)
|
||||
if err := vss[3].SignProposal(config.ChainID(), proposal); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
}
|
||||
|
||||
// set the proposal block
|
||||
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
|
||||
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
|
||||
err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx2, nil)
|
||||
assert.Nil(t, err)
|
||||
|
||||
rs = css[0].GetRoundState()
|
||||
for i := 0; i < nVals+1; i++ {
|
||||
if i == selfIndex {
|
||||
continue
|
||||
}
|
||||
signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i])
|
||||
}
|
||||
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
//height 5
|
||||
height++
|
||||
incrementHeight(vss...)
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
rs = css[0].GetRoundState()
|
||||
for i := 0; i < nVals+1; i++ {
|
||||
if i == selfIndex {
|
||||
continue
|
||||
}
|
||||
signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i])
|
||||
}
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
//height 6
|
||||
height++
|
||||
incrementHeight(vss...)
|
||||
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
|
||||
err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, nil)
|
||||
assert.Nil(t, err)
|
||||
propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2)
|
||||
propBlockParts = propBlock.MakePartSet(partSize)
|
||||
blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()}
|
||||
newVss = make([]*validatorStub, nVals+3)
|
||||
copy(newVss, vss[:nVals+3])
|
||||
sort.Sort(ValidatorStubsByAddress(newVss))
|
||||
for i, vs := range newVss {
|
||||
if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) {
|
||||
selfIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
proposal = types.NewProposal(vss[1].Height, round, -1, blockID)
|
||||
if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
}
|
||||
|
||||
// set the proposal block
|
||||
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
rs = css[0].GetRoundState()
|
||||
for i := 0; i < nVals+3; i++ {
|
||||
if i == selfIndex {
|
||||
continue
|
||||
}
|
||||
signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i])
|
||||
}
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
sim.Chain = make([]*types.Block, 0)
|
||||
sim.Commits = make([]*types.Commit, 0)
|
||||
for i := 1; i <= numBlocks; i++ {
|
||||
sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i)))
|
||||
sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i)))
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from scratch
|
||||
func TestHandshakeReplayAll(t *testing.T) {
|
||||
for i, m := range modes {
|
||||
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testHandshakeReplay(t, config, 0, m)
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, 0, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, 0, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync many, not from scratch
|
||||
func TestHandshakeReplaySome(t *testing.T) {
|
||||
for i, m := range modes {
|
||||
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testHandshakeReplay(t, config, 1, m)
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, 1, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, 1, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from lagging by one
|
||||
func TestHandshakeReplayOne(t *testing.T) {
|
||||
for i, m := range modes {
|
||||
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testHandshakeReplay(t, config, NUM_BLOCKS-1, m)
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, numBlocks-1, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, numBlocks-1, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from caught up
|
||||
func TestHandshakeReplayNone(t *testing.T) {
|
||||
for i, m := range modes {
|
||||
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testHandshakeReplay(t, config, NUM_BLOCKS, m)
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, numBlocks, m, false)
|
||||
}
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, config, numBlocks, m, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx
|
||||
func TestMockProxyApp(t *testing.T) {
|
||||
sim.CleanupFunc() //clean the test env created in TestSimulateValidatorsChange
|
||||
logger := log.TestingLogger()
|
||||
var validTxs, invalidTxs = 0, 0
|
||||
txIndex := 0
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
abciResWithEmptyDeliverTx := new(sm.ABCIResponses)
|
||||
abciResWithEmptyDeliverTx.DeliverTx = make([]*abci.ResponseDeliverTx, 0)
|
||||
abciResWithEmptyDeliverTx.DeliverTx = append(abciResWithEmptyDeliverTx.DeliverTx, &abci.ResponseDeliverTx{})
|
||||
|
||||
// called when saveABCIResponses:
|
||||
bytes := cdc.MustMarshalBinaryBare(abciResWithEmptyDeliverTx)
|
||||
loadedAbciRes := new(sm.ABCIResponses)
|
||||
|
||||
// this also happens sm.LoadABCIResponses
|
||||
err := cdc.UnmarshalBinaryBare(bytes, loadedAbciRes)
|
||||
require.NoError(t, err)
|
||||
|
||||
mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes)
|
||||
|
||||
abciRes := new(sm.ABCIResponses)
|
||||
abciRes.DeliverTx = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTx))
|
||||
// Execute transactions and get hash.
|
||||
proxyCb := func(req *abci.Request, res *abci.Response) {
|
||||
switch r := res.Value.(type) {
|
||||
case *abci.Response_DeliverTx:
|
||||
// TODO: make use of res.Log
|
||||
// TODO: make use of this info
|
||||
// Blocks may include invalid txs.
|
||||
txRes := r.DeliverTx
|
||||
if txRes.Code == abci.CodeTypeOK {
|
||||
validTxs++
|
||||
} else {
|
||||
logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log)
|
||||
invalidTxs++
|
||||
}
|
||||
abciRes.DeliverTx[txIndex] = txRes
|
||||
txIndex++
|
||||
}
|
||||
}
|
||||
mock.SetResponseCallback(proxyCb)
|
||||
|
||||
someTx := []byte("tx")
|
||||
mock.DeliverTxAsync(someTx)
|
||||
})
|
||||
assert.True(t, validTxs == 1)
|
||||
assert.True(t, invalidTxs == 0)
|
||||
}
|
||||
|
||||
func tempWALWithData(data []byte) string {
|
||||
walFile, err := ioutil.TempFile("", "wal")
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to create temp WAL file: %v", err))
|
||||
panic(fmt.Sprintf("failed to create temp WAL file: %v", err))
|
||||
}
|
||||
_, err = walFile.Write(data)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to write to temp WAL file: %v", err))
|
||||
panic(fmt.Sprintf("failed to write to temp WAL file: %v", err))
|
||||
}
|
||||
if err := walFile.Close(); err != nil {
|
||||
panic(fmt.Errorf("failed to close temp WAL file: %v", err))
|
||||
panic(fmt.Sprintf("failed to close temp WAL file: %v", err))
|
||||
}
|
||||
return walFile.Name()
|
||||
}
|
||||
|
||||
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
|
||||
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint) {
|
||||
walBody, err := WALWithNBlocks(t, NUM_BLOCKS)
|
||||
require.NoError(t, err)
|
||||
walFile := tempWALWithData(walBody)
|
||||
config.Consensus.SetWalFile(walFile)
|
||||
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) {
|
||||
var chain []*types.Block
|
||||
var commits []*types.Commit
|
||||
var store *mockBlockStore
|
||||
var stateDB dbm.DB
|
||||
var genisisState sm.State
|
||||
if testValidatorsChange {
|
||||
testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
|
||||
defer os.RemoveAll(testConfig.RootDir)
|
||||
stateDB = dbm.NewMemDB()
|
||||
genisisState = sim.GenesisState
|
||||
config = sim.Config
|
||||
chain = sim.Chain
|
||||
commits = sim.Commits
|
||||
store = newMockBlockStore(config, genisisState.ConsensusParams)
|
||||
} else { //test single node
|
||||
testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
|
||||
defer os.RemoveAll(testConfig.RootDir)
|
||||
walBody, err := WALWithNBlocks(t, numBlocks)
|
||||
require.NoError(t, err)
|
||||
walFile := tempWALWithData(walBody)
|
||||
config.Consensus.SetWalFile(walFile)
|
||||
|
||||
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
|
||||
wal, err := NewWAL(walFile)
|
||||
require.NoError(t, err)
|
||||
wal.SetLogger(log.TestingLogger())
|
||||
err = wal.Start()
|
||||
require.NoError(t, err)
|
||||
defer wal.Stop()
|
||||
wal, err := NewWAL(walFile)
|
||||
require.NoError(t, err)
|
||||
wal.SetLogger(log.TestingLogger())
|
||||
err = wal.Start()
|
||||
require.NoError(t, err)
|
||||
defer wal.Stop()
|
||||
|
||||
chain, commits, err := makeBlockchainFromWAL(wal)
|
||||
require.NoError(t, err)
|
||||
|
||||
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion)
|
||||
chain, commits, err = makeBlockchainFromWAL(wal)
|
||||
require.NoError(t, err)
|
||||
stateDB, genisisState, store = stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion)
|
||||
}
|
||||
store.chain = chain
|
||||
store.commits = commits
|
||||
|
||||
state := genisisState.Copy()
|
||||
// run the chain through state.ApplyBlock to build up the tendermint state
|
||||
state = buildTMStateFromChain(config, stateDB, state, chain, mode)
|
||||
state = buildTMStateFromChain(config, stateDB, state, chain, nBlocks, mode)
|
||||
latestAppHash := state.AppHash
|
||||
|
||||
// make a new client creator
|
||||
kvstoreApp := kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "2"))
|
||||
kvstoreApp := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a", nBlocks, mode)))
|
||||
|
||||
clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp)
|
||||
if nBlocks > 0 {
|
||||
// run nBlocks against a new client to build up the app state.
|
||||
// use a throwaway tendermint state
|
||||
proxyApp := proxy.NewAppConns(clientCreator2)
|
||||
stateDB, state, _ := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion)
|
||||
buildAppStateFromChain(proxyApp, stateDB, state, chain, nBlocks, mode)
|
||||
stateDB1 := dbm.NewMemDB()
|
||||
sm.SaveState(stateDB1, genisisState)
|
||||
buildAppStateFromChain(proxyApp, stateDB1, genisisState, chain, nBlocks, mode)
|
||||
}
|
||||
|
||||
// now start the app using the handshake - it should sync
|
||||
@@ -391,8 +667,8 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
|
||||
t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash)
|
||||
}
|
||||
|
||||
expectedBlocksToSync := NUM_BLOCKS - nBlocks
|
||||
if nBlocks == NUM_BLOCKS && mode > 0 {
|
||||
expectedBlocksToSync := numBlocks - nBlocks
|
||||
if nBlocks == numBlocks && mode > 0 {
|
||||
expectedBlocksToSync++
|
||||
} else if nBlocks > 0 && mode == 1 {
|
||||
expectedBlocksToSync++
|
||||
@@ -407,7 +683,7 @@ func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.Ap
|
||||
testPartSize := types.BlockPartSizeBytes
|
||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
||||
|
||||
blkID := types.BlockID{blk.Hash(), blk.MakePartSet(testPartSize).Header()}
|
||||
blkID := types.BlockID{Hash: blk.Hash(), PartsHeader: blk.MakePartSet(testPartSize).Header()}
|
||||
newState, err := blockExec.ApplyBlock(st, blkID, blk)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -423,12 +699,14 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version
|
||||
validators := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm.SaveState(stateDB, state) //save height 1's validatorsInfo
|
||||
|
||||
switch mode {
|
||||
case 0:
|
||||
@@ -451,21 +729,23 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
|
||||
|
||||
}
|
||||
|
||||
func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, mode uint) sm.State {
|
||||
func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, nBlocks int, mode uint) sm.State {
|
||||
// run the whole chain against this client to build up the tendermint state
|
||||
clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "1")))
|
||||
clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))))
|
||||
proxyApp := proxy.NewAppConns(clientCreator)
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version
|
||||
validators := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm.SaveState(stateDB, state) //save height 1's validatorsInfo
|
||||
|
||||
switch mode {
|
||||
case 0:
|
||||
@@ -489,28 +769,162 @@ func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, c
|
||||
return state
|
||||
}
|
||||
|
||||
func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
|
||||
// 1. Initialize tendermint and commit 3 blocks with the following app hashes:
|
||||
// - 0x01
|
||||
// - 0x02
|
||||
// - 0x03
|
||||
config := ResetConfig("handshake_test_")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
const appVersion = 0x0
|
||||
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), appVersion)
|
||||
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
||||
state.LastValidators = state.Validators.Copy()
|
||||
// mode = 0 for committing all the blocks
|
||||
blocks := makeBlocks(3, &state, privVal)
|
||||
store.chain = blocks
|
||||
|
||||
// 2. Tendermint must panic if app returns wrong hash for the first block
|
||||
// - RANDOM HASH
|
||||
// - 0x02
|
||||
// - 0x03
|
||||
{
|
||||
app := &badApp{numBlocks: 3, allHashesAreWrong: true}
|
||||
clientCreator := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(clientCreator)
|
||||
err := proxyApp.Start()
|
||||
require.NoError(t, err)
|
||||
defer proxyApp.Stop()
|
||||
|
||||
assert.Panics(t, func() {
|
||||
h := NewHandshaker(stateDB, state, store, genDoc)
|
||||
h.Handshake(proxyApp)
|
||||
})
|
||||
}
|
||||
|
||||
// 3. Tendermint must panic if app returns wrong hash for the last block
|
||||
// - 0x01
|
||||
// - 0x02
|
||||
// - RANDOM HASH
|
||||
{
|
||||
app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true}
|
||||
clientCreator := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(clientCreator)
|
||||
err := proxyApp.Start()
|
||||
require.NoError(t, err)
|
||||
defer proxyApp.Stop()
|
||||
|
||||
assert.Panics(t, func() {
|
||||
h := NewHandshaker(stateDB, state, store, genDoc)
|
||||
h.Handshake(proxyApp)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block {
|
||||
blocks := make([]*types.Block, 0)
|
||||
|
||||
var (
|
||||
prevBlock *types.Block
|
||||
prevBlockMeta *types.BlockMeta
|
||||
)
|
||||
|
||||
appHeight := byte(0x01)
|
||||
for i := 0; i < n; i++ {
|
||||
height := int64(i + 1)
|
||||
|
||||
block, parts := makeBlock(*state, prevBlock, prevBlockMeta, privVal, height)
|
||||
blocks = append(blocks, block)
|
||||
|
||||
prevBlock = block
|
||||
prevBlockMeta = types.NewBlockMeta(block, parts)
|
||||
|
||||
// update state
|
||||
state.AppHash = []byte{appHeight}
|
||||
appHeight++
|
||||
state.LastBlockHeight = height
|
||||
}
|
||||
|
||||
return blocks
|
||||
}
|
||||
|
||||
func makeVote(header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote {
|
||||
addr := privVal.GetPubKey().Address()
|
||||
idx, _ := valset.GetByAddress(addr)
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: idx,
|
||||
Height: header.Height,
|
||||
Round: 1,
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: types.PrecommitType,
|
||||
BlockID: blockID,
|
||||
}
|
||||
|
||||
privVal.SignVote(header.ChainID, vote)
|
||||
|
||||
return vote
|
||||
}
|
||||
|
||||
func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta,
|
||||
privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) {
|
||||
|
||||
lastCommit := types.NewCommit(types.BlockID{}, nil)
|
||||
if height > 1 {
|
||||
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVal).CommitSig()
|
||||
lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{vote})
|
||||
}
|
||||
|
||||
return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
}
|
||||
|
||||
type badApp struct {
|
||||
abci.BaseApplication
|
||||
numBlocks byte
|
||||
height byte
|
||||
allHashesAreWrong bool
|
||||
onlyLastHashIsWrong bool
|
||||
}
|
||||
|
||||
func (app *badApp) Commit() abci.ResponseCommit {
|
||||
app.height++
|
||||
if app.onlyLastHashIsWrong {
|
||||
if app.height == app.numBlocks {
|
||||
return abci.ResponseCommit{Data: cmn.RandBytes(8)}
|
||||
}
|
||||
return abci.ResponseCommit{Data: []byte{app.height}}
|
||||
} else if app.allHashesAreWrong {
|
||||
return abci.ResponseCommit{Data: cmn.RandBytes(8)}
|
||||
}
|
||||
|
||||
panic("either allHashesAreWrong or onlyLastHashIsWrong must be set")
|
||||
}
|
||||
|
||||
//--------------------------
|
||||
// utils for making blocks
|
||||
|
||||
func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
var height int64
|
||||
|
||||
// Search for height marker
|
||||
gr, found, err := wal.SearchForEndHeight(0, &WALSearchOptions{})
|
||||
gr, found, err := wal.SearchForEndHeight(height, &WALSearchOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !found {
|
||||
return nil, nil, fmt.Errorf("WAL does not contain height %d.", 1)
|
||||
return nil, nil, fmt.Errorf("WAL does not contain height %d", height)
|
||||
}
|
||||
defer gr.Close() // nolint: errcheck
|
||||
|
||||
// log.Notice("Build a blockchain by reading from the WAL")
|
||||
|
||||
var blocks []*types.Block
|
||||
var commits []*types.Commit
|
||||
|
||||
var thisBlockParts *types.PartSet
|
||||
var thisBlockCommit *types.Commit
|
||||
var height int64
|
||||
var (
|
||||
blocks []*types.Block
|
||||
commits []*types.Commit
|
||||
thisBlockParts *types.PartSet
|
||||
thisBlockCommit *types.Commit
|
||||
)
|
||||
|
||||
dec := NewWALDecoder(gr)
|
||||
for {
|
||||
@@ -602,7 +1016,8 @@ func stateAndStore(config *cfg.Config, pubKey crypto.PubKey, appVersion version.
|
||||
stateDB := dbm.NewMemDB()
|
||||
state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile())
|
||||
state.Version.Consensus.App = appVersion
|
||||
store := NewMockBlockStore(config, state.ConsensusParams)
|
||||
store := newMockBlockStore(config, state.ConsensusParams)
|
||||
sm.SaveState(stateDB, state)
|
||||
return stateDB, state, store
|
||||
}
|
||||
|
||||
@@ -617,7 +1032,7 @@ type mockBlockStore struct {
|
||||
}
|
||||
|
||||
// TODO: NewBlockStore(db.NewMemDB) ...
|
||||
func NewMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore {
|
||||
func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore {
|
||||
return &mockBlockStore{config, params, nil, nil}
|
||||
}
|
||||
|
||||
@@ -626,7 +1041,7 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain
|
||||
func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
block := bs.chain[height-1]
|
||||
return &types.BlockMeta{
|
||||
BlockID: types.BlockID{block.Hash(), block.MakePartSet(types.BlockPartSizeBytes).Header()},
|
||||
BlockID: types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()},
|
||||
Header: block.Header,
|
||||
}
|
||||
}
|
||||
@@ -640,15 +1055,16 @@ func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||
return bs.commits[height-1]
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
//---------------------------------------
|
||||
// Test handshake/init chain
|
||||
|
||||
func TestInitChainUpdateValidators(t *testing.T) {
|
||||
func TestHandshakeUpdatesValidators(t *testing.T) {
|
||||
val, _ := types.RandValidator(true, 10)
|
||||
vals := types.NewValidatorSet([]*types.Validator{val})
|
||||
app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
|
||||
clientCreator := proxy.NewLocalClientCreator(app)
|
||||
|
||||
config := ResetConfig("proxy_test_")
|
||||
config := ResetConfig("handshake_test_")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0)
|
||||
|
@@ -484,18 +484,9 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
|
||||
return
|
||||
}
|
||||
seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight)
|
||||
lastPrecommits := types.NewVoteSet(state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.PrecommitType, state.LastValidators)
|
||||
for _, precommit := range seenCommit.Precommits {
|
||||
if precommit == nil {
|
||||
continue
|
||||
}
|
||||
added, err := lastPrecommits.AddVote(seenCommit.ToVote(precommit))
|
||||
if !added || err != nil {
|
||||
cmn.PanicCrisis(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err))
|
||||
}
|
||||
}
|
||||
lastPrecommits := types.CommitToVoteSet(state.ChainID, seenCommit, state.LastValidators)
|
||||
if !lastPrecommits.HasTwoThirdsMajority() {
|
||||
cmn.PanicSanity("Failed to reconstruct LastCommit: Does not have +2/3 maj")
|
||||
panic("Failed to reconstruct LastCommit: Does not have +2/3 maj")
|
||||
}
|
||||
cs.LastCommit = lastPrecommits
|
||||
}
|
||||
@@ -504,13 +495,13 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
|
||||
// The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight.
|
||||
func (cs *ConsensusState) updateToState(state sm.State) {
|
||||
if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight {
|
||||
cmn.PanicSanity(fmt.Sprintf("updateToState() expected state height of %v but found %v",
|
||||
panic(fmt.Sprintf("updateToState() expected state height of %v but found %v",
|
||||
cs.Height, state.LastBlockHeight))
|
||||
}
|
||||
if !cs.state.IsEmpty() && cs.state.LastBlockHeight+1 != cs.Height {
|
||||
// This might happen when someone else is mutating cs.state.
|
||||
// Someone forgot to pass in state.Copy() somewhere?!
|
||||
cmn.PanicSanity(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v",
|
||||
panic(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v",
|
||||
cs.state.LastBlockHeight+1, cs.Height))
|
||||
}
|
||||
|
||||
@@ -530,7 +521,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
|
||||
lastPrecommits := (*types.VoteSet)(nil)
|
||||
if cs.CommitRound > -1 && cs.Votes != nil {
|
||||
if !cs.Votes.Precommits(cs.CommitRound).HasTwoThirdsMajority() {
|
||||
cmn.PanicSanity("updateToState(state) called but last Precommit round didn't have +2/3")
|
||||
panic("updateToState(state) called but last Precommit round didn't have +2/3")
|
||||
}
|
||||
lastPrecommits = cs.Votes.Precommits(cs.CommitRound)
|
||||
}
|
||||
@@ -1047,7 +1038,7 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
|
||||
return
|
||||
}
|
||||
if !cs.Votes.Prevotes(round).HasTwoThirdsAny() {
|
||||
cmn.PanicSanity(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
|
||||
panic(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
|
||||
}
|
||||
logger.Info(fmt.Sprintf("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
@@ -1103,7 +1094,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
// the latest POLRound should be this round.
|
||||
polRound, _ := cs.Votes.POLInfo()
|
||||
if polRound < round {
|
||||
cmn.PanicSanity(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound))
|
||||
panic(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound))
|
||||
}
|
||||
|
||||
// +2/3 prevoted nil. Unlock and precommit nil.
|
||||
@@ -1137,7 +1128,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
|
||||
// Validate the block.
|
||||
if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil {
|
||||
cmn.PanicConsensus(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
|
||||
panic(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
|
||||
}
|
||||
cs.LockedRound = round
|
||||
cs.LockedBlock = cs.ProposalBlock
|
||||
@@ -1175,7 +1166,7 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
|
||||
return
|
||||
}
|
||||
if !cs.Votes.Precommits(round).HasTwoThirdsAny() {
|
||||
cmn.PanicSanity(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
|
||||
panic(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
|
||||
}
|
||||
logger.Info(fmt.Sprintf("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
@@ -1214,7 +1205,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
||||
|
||||
blockID, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority()
|
||||
if !ok {
|
||||
cmn.PanicSanity("RunActionCommit() expects +2/3 precommits")
|
||||
panic("RunActionCommit() expects +2/3 precommits")
|
||||
}
|
||||
|
||||
// The Locked* fields no longer matter.
|
||||
@@ -1247,7 +1238,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) {
|
||||
logger := cs.Logger.With("height", height)
|
||||
|
||||
if cs.Height != height {
|
||||
cmn.PanicSanity(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
|
||||
panic(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
|
||||
}
|
||||
|
||||
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
||||
@@ -1277,16 +1268,16 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
|
||||
block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts
|
||||
|
||||
if !ok {
|
||||
cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority"))
|
||||
panic(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority"))
|
||||
}
|
||||
if !blockParts.HasHeader(blockID.PartsHeader) {
|
||||
cmn.PanicSanity(fmt.Sprintf("Expected ProposalBlockParts header to be commit header"))
|
||||
panic(fmt.Sprintf("Expected ProposalBlockParts header to be commit header"))
|
||||
}
|
||||
if !block.HashesTo(blockID.Hash) {
|
||||
cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
|
||||
panic(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
|
||||
}
|
||||
if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil {
|
||||
cmn.PanicConsensus(fmt.Sprintf("+2/3 committed an invalid block: %v", err))
|
||||
panic(fmt.Sprintf("+2/3 committed an invalid block: %v", err))
|
||||
}
|
||||
|
||||
cs.Logger.Info(fmt.Sprintf("Finalizing commit of block with %d txs", block.NumTxs),
|
||||
|
@@ -14,7 +14,7 @@ import (
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
p2pdummy "github.com/tendermint/tendermint/p2p/dummy"
|
||||
p2pmock "github.com/tendermint/tendermint/p2p/mock"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -239,7 +239,7 @@ func TestStateFullRound1(t *testing.T) {
|
||||
cs.SetEventBus(eventBus)
|
||||
eventBus.Start()
|
||||
|
||||
voteCh := subscribe(cs.eventBus, types.EventQueryVote)
|
||||
voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote)
|
||||
propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)
|
||||
newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound)
|
||||
|
||||
@@ -267,7 +267,7 @@ func TestStateFullRoundNil(t *testing.T) {
|
||||
cs, vss := randConsensusState(1)
|
||||
height, round := cs.Height, cs.Round
|
||||
|
||||
voteCh := subscribe(cs.eventBus, types.EventQueryVote)
|
||||
voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote)
|
||||
|
||||
cs.enterPrevote(height, round)
|
||||
cs.startRoutines(4)
|
||||
@@ -286,7 +286,7 @@ func TestStateFullRound2(t *testing.T) {
|
||||
vs2 := vss[1]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
|
||||
voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote)
|
||||
newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock)
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
@@ -330,7 +330,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
|
||||
voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote)
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
|
||||
@@ -370,7 +370,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
|
||||
// (note we're entering precommit for a second time this round)
|
||||
// but with invalid args. then we enterPrecommitWait, and the timeout to new round
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
///
|
||||
|
||||
@@ -384,7 +384,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
incrementRound(vs2)
|
||||
|
||||
// now we're on a new round and not the proposer, so wait for timeout
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds())
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
|
||||
|
||||
rs := cs1.GetRoundState()
|
||||
|
||||
@@ -403,7 +403,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
|
||||
// now we're going to enter prevote again, but with invalid args
|
||||
// and then prevote wait, which should timeout. then wait for precommit
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds())
|
||||
|
||||
ensurePrecommit(voteCh, height, round) // precommit
|
||||
// the proposed block should still be locked and our precommit added
|
||||
@@ -416,7 +416,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
|
||||
// (note we're entering precommit for a second time this round, but with invalid args
|
||||
// then we enterPrecommitWait and timeout into NewRound
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
round = round + 1 // entering new round
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
@@ -441,7 +441,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
signAddVotes(cs1, types.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
|
||||
ensurePrevote(voteCh, height, round)
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds())
|
||||
ensurePrecommit(voteCh, height, round) // precommit
|
||||
|
||||
validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal
|
||||
@@ -449,7 +449,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
signAddVotes(cs1, types.PrecommitType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
cs2, _ := randConsensusState(2) // needed so generated block is different than locked block
|
||||
// before we time out into new round, set next proposal block
|
||||
@@ -482,7 +482,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
|
||||
ensurePrevote(voteCh, height, round)
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds())
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal
|
||||
|
||||
@@ -542,7 +542,7 @@ func TestStateLockPOLRelock(t *testing.T) {
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
// timeout to new round
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
//XXX: this isnt guaranteed to get there before the timeoutPropose ...
|
||||
@@ -632,7 +632,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
|
||||
// timeout to new round
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
rs = cs1.GetRoundState()
|
||||
lockedBlockHash := rs.LockedBlock.Hash()
|
||||
|
||||
@@ -710,7 +710,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
|
||||
// cs1 precommit nil
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
t.Log("### ONTO ROUND 1")
|
||||
|
||||
@@ -754,7 +754,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
round = round + 1 // moving to the next round
|
||||
@@ -767,7 +767,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
*/
|
||||
|
||||
// timeout of propose
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds())
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
|
||||
|
||||
// finish prevote
|
||||
ensurePrevote(voteCh, height, round)
|
||||
@@ -850,7 +850,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
// timeout of precommit wait to new round
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
// in round 2 we see the polkad block from round 0
|
||||
@@ -919,7 +919,7 @@ func TestProposeValidBlock(t *testing.T) {
|
||||
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
round = round + 1 // moving to the next round
|
||||
@@ -929,7 +929,7 @@ func TestProposeValidBlock(t *testing.T) {
|
||||
t.Log("### ONTO ROUND 2")
|
||||
|
||||
// timeout of propose
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds())
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], propBlockHash)
|
||||
@@ -952,7 +952,7 @@ func TestProposeValidBlock(t *testing.T) {
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
t.Log("### ONTO ROUND 3")
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
|
||||
@@ -1004,7 +1004,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) {
|
||||
// vs3 send prevote nil
|
||||
signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs3)
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds())
|
||||
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
// we should have precommitted
|
||||
@@ -1052,7 +1052,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) {
|
||||
startTestRound(cs1, cs1.Height, round)
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds())
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], nil)
|
||||
@@ -1065,7 +1065,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) {
|
||||
signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
|
||||
ensureNewValidBlock(validBlockCh, height, round)
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds())
|
||||
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
validatePrecommit(t, cs1, round, -1, vss[0], nil, nil)
|
||||
@@ -1099,7 +1099,7 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) {
|
||||
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
ensureNewRound(newRoundCh, height, round+1)
|
||||
}
|
||||
|
||||
@@ -1131,7 +1131,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) {
|
||||
rs := cs1.GetRoundState()
|
||||
assert.True(t, rs.Step == cstypes.RoundStepPropose) // P0 does not prevote before timeoutPropose expires
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPropose.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Propose(round).Nanoseconds())
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], nil)
|
||||
@@ -1165,7 +1165,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) {
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
validatePrecommit(t, cs1, round, -1, vss[0], nil, nil)
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
@@ -1191,7 +1191,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) {
|
||||
incrementRound(vss[1:]...)
|
||||
signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds())
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], nil)
|
||||
@@ -1332,7 +1332,7 @@ func TestStartNextHeightCorrectly(t *testing.T) {
|
||||
|
||||
cs1.txNotifier.(*fakeTxNotifier).Notify()
|
||||
|
||||
ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.TimeoutPropose.Nanoseconds())
|
||||
ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.Propose(round).Nanoseconds())
|
||||
rs = cs1.GetRoundState()
|
||||
assert.False(t, rs.TriggeredTimeoutPrecommit, "triggeredTimeoutPrecommit should be false at the beginning of each round")
|
||||
}
|
||||
@@ -1375,12 +1375,8 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
|
||||
// add precommits
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs4)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
assert.True(t, rs.TriggeredTimeoutPrecommit)
|
||||
|
||||
ensureNewBlockHeader(newBlockHeader, height, theBlockHash)
|
||||
|
||||
prop, propBlock := decideProposal(cs1, vs2, height+1, 0)
|
||||
@@ -1519,7 +1515,7 @@ func TestStateHalt1(t *testing.T) {
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
// timeout to new round
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds())
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
|
||||
@@ -1548,7 +1544,7 @@ func TestStateHalt1(t *testing.T) {
|
||||
func TestStateOutputsBlockPartsStats(t *testing.T) {
|
||||
// create dummy peer
|
||||
cs, _ := randConsensusState(1)
|
||||
peer := p2pdummy.NewPeer()
|
||||
peer := p2pmock.NewPeer(nil)
|
||||
|
||||
// 1) new block part
|
||||
parts := types.NewPartSetFromData(cmn.RandBytes(100), 10)
|
||||
@@ -1591,7 +1587,7 @@ func TestStateOutputsBlockPartsStats(t *testing.T) {
|
||||
func TestStateOutputVoteStats(t *testing.T) {
|
||||
cs, vss := randConsensusState(2)
|
||||
// create dummy peer
|
||||
peer := p2pdummy.NewPeer()
|
||||
peer := p2pmock.NewPeer(nil)
|
||||
|
||||
vote := signVote(vss[1], types.PrecommitType, []byte("test"), types.PartSetHeader{})
|
||||
|
||||
@@ -1627,3 +1623,12 @@ func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Messa
|
||||
}
|
||||
return sub.Out()
|
||||
}
|
||||
|
||||
// subscribe subscribes test client to the given query and returns a channel with cap = 0.
|
||||
func subscribeUnBuffered(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message {
|
||||
sub, err := eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, q)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q))
|
||||
}
|
||||
return sub.Out()
|
||||
}
|
||||
|
@@ -6,7 +6,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -83,7 +82,7 @@ func (hvs *HeightVoteSet) SetRound(round int) {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
if hvs.round != 0 && (round < hvs.round+1) {
|
||||
cmn.PanicSanity("SetRound() must increment hvs.round")
|
||||
panic("SetRound() must increment hvs.round")
|
||||
}
|
||||
for r := hvs.round + 1; r <= round; r++ {
|
||||
if _, ok := hvs.roundVoteSets[r]; ok {
|
||||
@@ -96,7 +95,7 @@ func (hvs *HeightVoteSet) SetRound(round int) {
|
||||
|
||||
func (hvs *HeightVoteSet) addRound(round int) {
|
||||
if _, ok := hvs.roundVoteSets[round]; ok {
|
||||
cmn.PanicSanity("addRound() for an existing round")
|
||||
panic("addRound() for an existing round")
|
||||
}
|
||||
// log.Debug("addRound(round)", "round", round)
|
||||
prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrevoteType, hvs.valSet)
|
||||
@@ -169,8 +168,7 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ types.SignedMsgType) *type
|
||||
case types.PrecommitType:
|
||||
return rvs.Precommits
|
||||
default:
|
||||
cmn.PanicSanity(fmt.Sprintf("Unexpected vote type %X", type_))
|
||||
return nil
|
||||
panic(fmt.Sprintf("Unexpected vote type %X", type_))
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -10,12 +10,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
@@ -45,13 +47,14 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read genesis file")
|
||||
}
|
||||
stateDB := db.NewMemDB()
|
||||
blockStoreDB := db.NewMemDB()
|
||||
stateDB := blockStoreDB
|
||||
state, err := sm.MakeGenesisState(genDoc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to make genesis state")
|
||||
}
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion
|
||||
sm.SaveState(stateDB, state)
|
||||
blockStore := bc.NewBlockStore(blockStoreDB)
|
||||
proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app))
|
||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||
@@ -66,7 +69,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
return errors.Wrap(err, "failed to start event bus")
|
||||
}
|
||||
defer eventBus.Stop()
|
||||
mempool := sm.MockMempool{}
|
||||
mempool := mock.Mempool{}
|
||||
evpool := sm.MockEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
||||
consensusState := NewConsensusState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
|
||||
|
@@ -37,9 +37,6 @@
|
||||
// sum := crypto.Sha256([]byte("This is Tendermint"))
|
||||
// fmt.Printf("%x\n", sum)
|
||||
|
||||
// Ripemd160
|
||||
// sum := crypto.Ripemd160([]byte("This is consensus"))
|
||||
// fmt.Printf("%x\n", sum)
|
||||
package crypto
|
||||
|
||||
// TODO: Add more docs in here
|
||||
|
@@ -26,10 +26,3 @@ func ExampleSha256() {
|
||||
// Output:
|
||||
// f91afb642f3d1c87c17eb01aae5cb65c242dfdbe7cf1066cc260f4ce5d33b94e
|
||||
}
|
||||
|
||||
func ExampleRipemd160() {
|
||||
sum := crypto.Ripemd160([]byte("This is Tendermint"))
|
||||
fmt.Printf("%x\n", sum)
|
||||
// Output:
|
||||
// 051e22663e8f0fd2f2302f1210f954adff009005
|
||||
}
|
||||
|
@@ -2,8 +2,6 @@ package crypto
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
)
|
||||
|
||||
func Sha256(bytes []byte) []byte {
|
||||
@@ -11,9 +9,3 @@ func Sha256(bytes []byte) []byte {
|
||||
hasher.Write(bytes)
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
func Ripemd160(bytes []byte) []byte {
|
||||
hasher := ripemd160.New()
|
||||
hasher.Write(bytes)
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
@@ -20,6 +20,77 @@ func SimpleHashFromByteSlices(items [][]byte) []byte {
|
||||
}
|
||||
}
|
||||
|
||||
// SimpleHashFromByteSliceIterative is an iterative alternative to
|
||||
// SimpleHashFromByteSlice motivated by potential performance improvements.
|
||||
// (#2611) had suggested that an iterative version of
|
||||
// SimpleHashFromByteSlice would be faster, presumably because
|
||||
// we can envision some overhead accumulating from stack
|
||||
// frames and function calls. Additionally, a recursive algorithm risks
|
||||
// hitting the stack limit and causing a stack overflow should the tree
|
||||
// be too large.
|
||||
//
|
||||
// Provided here is an iterative alternative, a simple test to assert
|
||||
// correctness and a benchmark. On the performance side, there appears to
|
||||
// be no overall difference:
|
||||
//
|
||||
// BenchmarkSimpleHashAlternatives/recursive-4 20000 77677 ns/op
|
||||
// BenchmarkSimpleHashAlternatives/iterative-4 20000 76802 ns/op
|
||||
//
|
||||
// On the surface it might seem that the additional overhead is due to
|
||||
// the different allocation patterns of the implementations. The recursive
|
||||
// version uses a single [][]byte slices which it then re-slices at each level of the tree.
|
||||
// The iterative version reproduces [][]byte once within the function and
|
||||
// then rewrites sub-slices of that array at each level of the tree.
|
||||
//
|
||||
// Experimenting by modifying the code to simply calculate the
|
||||
// hash and not store the result show little to no difference in performance.
|
||||
//
|
||||
// These preliminary results suggest:
|
||||
//
|
||||
// 1. The performance of the SimpleHashFromByteSlice is pretty good
|
||||
// 2. Go has low overhead for recursive functions
|
||||
// 3. The performance of the SimpleHashFromByteSlice routine is dominated
|
||||
// by the actual hashing of data
|
||||
//
|
||||
// Although this work is in no way exhaustive, point #3 suggests that
|
||||
// optimization of this routine would need to take an alternative
|
||||
// approach to make significant improvements on the current performance.
|
||||
//
|
||||
// Finally, considering that the recursive implementation is easier to
|
||||
// read, it might not be worthwhile to switch to a less intuitive
|
||||
// implementation for so little benefit.
|
||||
func SimpleHashFromByteSlicesIterative(input [][]byte) []byte {
|
||||
items := make([][]byte, len(input))
|
||||
|
||||
for i, leaf := range input {
|
||||
items[i] = leafHash(leaf)
|
||||
}
|
||||
|
||||
size := len(items)
|
||||
for {
|
||||
switch size {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return items[0]
|
||||
default:
|
||||
rp := 0 // read position
|
||||
wp := 0 // write position
|
||||
for rp < size {
|
||||
if rp+1 < size {
|
||||
items[wp] = innerHash(items[rp], items[rp+1])
|
||||
rp += 2
|
||||
} else {
|
||||
items[wp] = items[rp]
|
||||
rp += 1
|
||||
}
|
||||
wp += 1
|
||||
}
|
||||
size = wp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SimpleHashFromMap computes a Merkle tree from sorted map.
|
||||
// Like calling SimpleHashFromHashers with
|
||||
// `item = []byte(Hash(key) | Hash(value))`,
|
||||
|
@@ -70,6 +70,42 @@ func TestSimpleProof(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleHashAlternatives(t *testing.T) {
|
||||
|
||||
total := 100
|
||||
|
||||
items := make([][]byte, total)
|
||||
for i := 0; i < total; i++ {
|
||||
items[i] = testItem(cmn.RandBytes(tmhash.Size))
|
||||
}
|
||||
|
||||
rootHash1 := SimpleHashFromByteSlicesIterative(items)
|
||||
rootHash2 := SimpleHashFromByteSlices(items)
|
||||
require.Equal(t, rootHash1, rootHash2, "Unmatched root hashes: %X vs %X", rootHash1, rootHash2)
|
||||
}
|
||||
|
||||
func BenchmarkSimpleHashAlternatives(b *testing.B) {
|
||||
total := 100
|
||||
|
||||
items := make([][]byte, total)
|
||||
for i := 0; i < total; i++ {
|
||||
items[i] = testItem(cmn.RandBytes(tmhash.Size))
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.Run("recursive", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = SimpleHashFromByteSlices(items)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("iterative", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = SimpleHashFromByteSlicesIterative(items)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func Test_getSplitPoint(t *testing.T) {
|
||||
tests := []struct {
|
||||
length int
|
||||
|
@@ -1,7 +1,8 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/multisig/bitarray"
|
||||
@@ -53,13 +54,19 @@ func (mSig *Multisignature) AddSignature(sig []byte, index int) {
|
||||
mSig.Sigs[newSigIndex] = sig
|
||||
}
|
||||
|
||||
// AddSignatureFromPubKey adds a signature to the multisig,
|
||||
// at the index in keys corresponding to the provided pubkey.
|
||||
// AddSignatureFromPubKey adds a signature to the multisig, at the index in
|
||||
// keys corresponding to the provided pubkey.
|
||||
func (mSig *Multisignature) AddSignatureFromPubKey(sig []byte, pubkey crypto.PubKey, keys []crypto.PubKey) error {
|
||||
index := getIndex(pubkey, keys)
|
||||
if index == -1 {
|
||||
return errors.New("provided key didn't exist in pubkeys")
|
||||
keysStr := make([]string, len(keys))
|
||||
for i, k := range keys {
|
||||
keysStr[i] = fmt.Sprintf("%X", k.Bytes())
|
||||
}
|
||||
|
||||
return fmt.Errorf("provided key %X doesn't exist in pubkeys: \n%s", pubkey.Bytes(), strings.Join(keysStr, "\n"))
|
||||
}
|
||||
|
||||
mSig.AddSignature(sig, index)
|
||||
return nil
|
||||
}
|
||||
|
@@ -36,30 +36,68 @@ func TestThresholdMultisigValidCases(t *testing.T) {
|
||||
for tcIndex, tc := range cases {
|
||||
multisigKey := NewPubKeyMultisigThreshold(tc.k, tc.pubkeys)
|
||||
multisignature := NewMultisig(len(tc.pubkeys))
|
||||
|
||||
for i := 0; i < tc.k-1; i++ {
|
||||
signingIndex := tc.signingIndices[i]
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
|
||||
require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
|
||||
"multisig passed when i < k, tc %d, i %d", tcIndex, i)
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
|
||||
require.Equal(t, i+1, len(multisignature.Sigs),
|
||||
"adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex)
|
||||
require.NoError(
|
||||
t,
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys),
|
||||
)
|
||||
require.False(
|
||||
t,
|
||||
multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
|
||||
"multisig passed when i < k, tc %d, i %d", tcIndex, i,
|
||||
)
|
||||
require.NoError(
|
||||
t,
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys),
|
||||
)
|
||||
require.Equal(
|
||||
t,
|
||||
i+1,
|
||||
len(multisignature.Sigs),
|
||||
"adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex,
|
||||
)
|
||||
}
|
||||
require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
|
||||
"multisig passed with k - 1 sigs, tc %d", tcIndex)
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[tc.signingIndices[tc.k]], tc.pubkeys[tc.signingIndices[tc.k]], tc.pubkeys)
|
||||
require.True(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
|
||||
"multisig failed after k good signatures, tc %d", tcIndex)
|
||||
|
||||
require.False(
|
||||
t,
|
||||
multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
|
||||
"multisig passed with k - 1 sigs, tc %d", tcIndex,
|
||||
)
|
||||
require.NoError(
|
||||
t,
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[tc.signingIndices[tc.k]], tc.pubkeys[tc.signingIndices[tc.k]], tc.pubkeys),
|
||||
)
|
||||
require.True(
|
||||
t,
|
||||
multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
|
||||
"multisig failed after k good signatures, tc %d", tcIndex,
|
||||
)
|
||||
|
||||
for i := tc.k + 1; i < len(tc.signingIndices); i++ {
|
||||
signingIndex := tc.signingIndices[i]
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
|
||||
require.Equal(t, tc.passAfterKSignatures[i-tc.k-1],
|
||||
multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
|
||||
"multisig didn't verify as expected after k sigs, tc %d, i %d", tcIndex, i)
|
||||
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
|
||||
require.Equal(t, i+1, len(multisignature.Sigs),
|
||||
"adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex)
|
||||
require.NoError(
|
||||
t,
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys),
|
||||
)
|
||||
require.Equal(
|
||||
t,
|
||||
tc.passAfterKSignatures[i-tc.k-1],
|
||||
multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
|
||||
"multisig didn't verify as expected after k sigs, tc %d, i %d", tcIndex, i,
|
||||
)
|
||||
require.NoError(
|
||||
t,
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys),
|
||||
)
|
||||
require.Equal(
|
||||
t,
|
||||
i+1,
|
||||
len(multisignature.Sigs),
|
||||
"adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@ import (
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
|
||||
@@ -65,32 +66,61 @@ func (privKey PrivKeySecp256k1) Equals(other crypto.PrivKey) bool {
|
||||
}
|
||||
|
||||
// GenPrivKey generates a new ECDSA private key on curve secp256k1 private key.
|
||||
// It uses OS randomness in conjunction with the current global random seed
|
||||
// in tendermint/libs/common to generate the private key.
|
||||
// It uses OS randomness to generate the private key.
|
||||
func GenPrivKey() PrivKeySecp256k1 {
|
||||
return genPrivKey(crypto.CReader())
|
||||
}
|
||||
|
||||
// genPrivKey generates a new secp256k1 private key using the provided reader.
|
||||
func genPrivKey(rand io.Reader) PrivKeySecp256k1 {
|
||||
privKeyBytes := [32]byte{}
|
||||
_, err := io.ReadFull(rand, privKeyBytes[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
var privKeyBytes [32]byte
|
||||
d := new(big.Int)
|
||||
for {
|
||||
privKeyBytes = [32]byte{}
|
||||
_, err := io.ReadFull(rand, privKeyBytes[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
d.SetBytes(privKeyBytes[:])
|
||||
// break if we found a valid point (i.e. > 0 and < N == curverOrder)
|
||||
isValidFieldElement := 0 < d.Sign() && d.Cmp(secp256k1.S256().N) < 0
|
||||
if isValidFieldElement {
|
||||
break
|
||||
}
|
||||
}
|
||||
// crypto.CRandBytes is guaranteed to be 32 bytes long, so it can be
|
||||
// casted to PrivKeySecp256k1.
|
||||
|
||||
return PrivKeySecp256k1(privKeyBytes)
|
||||
}
|
||||
|
||||
var one = new(big.Int).SetInt64(1)
|
||||
|
||||
// GenPrivKeySecp256k1 hashes the secret with SHA2, and uses
|
||||
// that 32 byte output to create the private key.
|
||||
//
|
||||
// It makes sure the private key is a valid field element by setting:
|
||||
//
|
||||
// c = sha256(secret)
|
||||
// k = (c mod (n − 1)) + 1, where n = curve order.
|
||||
//
|
||||
// NOTE: secret should be the output of a KDF like bcrypt,
|
||||
// if it's derived from user input.
|
||||
func GenPrivKeySecp256k1(secret []byte) PrivKeySecp256k1 {
|
||||
privKey32 := sha256.Sum256(secret)
|
||||
// sha256.Sum256() is guaranteed to be 32 bytes long, so it can be
|
||||
// casted to PrivKeySecp256k1.
|
||||
secHash := sha256.Sum256(secret)
|
||||
// to guarantee that we have a valid field element, we use the approach of:
|
||||
// "Suite B Implementer’s Guide to FIPS 186-3", A.2.1
|
||||
// https://apps.nsa.gov/iaarchive/library/ia-guidance/ia-solutions-for-classified/algorithm-guidance/suite-b-implementers-guide-to-fips-186-3-ecdsa.cfm
|
||||
// see also https://github.com/golang/go/blob/0380c9ad38843d523d9c9804fe300cb7edd7cd3c/src/crypto/ecdsa/ecdsa.go#L89-L101
|
||||
fe := new(big.Int).SetBytes(secHash[:])
|
||||
n := new(big.Int).Sub(secp256k1.S256().N, one)
|
||||
fe.Mod(fe, n)
|
||||
fe.Add(fe, one)
|
||||
|
||||
feB := fe.Bytes()
|
||||
var privKey32 [32]byte
|
||||
// copy feB over to fixed 32 byte privKey32 and pad (if necessary)
|
||||
copy(privKey32[32-len(feB):32], feB)
|
||||
|
||||
return PrivKeySecp256k1(privKey32)
|
||||
}
|
||||
|
||||
|
39
crypto/secp256k1/secp256k1_cgo_test.go
Normal file
39
crypto/secp256k1/secp256k1_cgo_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
// +build libsecp256k1
|
||||
|
||||
package secp256k1
|
||||
|
||||
import (
|
||||
"github.com/magiconair/properties/assert"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPrivKeySecp256k1SignVerify(t *testing.T) {
|
||||
msg := []byte("A.1.2 ECC Key Pair Generation by Testing Candidates")
|
||||
priv := GenPrivKey()
|
||||
tests := []struct {
|
||||
name string
|
||||
privKey PrivKeySecp256k1
|
||||
wantSignErr bool
|
||||
wantVerifyPasses bool
|
||||
}{
|
||||
{name: "valid sign-verify round", privKey: priv, wantSignErr: false, wantVerifyPasses: true},
|
||||
{name: "invalid private key", privKey: [32]byte{}, wantSignErr: true, wantVerifyPasses: false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := tt.privKey.Sign(msg)
|
||||
if tt.wantSignErr {
|
||||
require.Error(t, err)
|
||||
t.Logf("Got error: %s", err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, got)
|
||||
|
||||
pub := tt.privKey.PubKey()
|
||||
assert.Equal(t, tt.wantVerifyPasses, pub.VerifyBytes(msg, got))
|
||||
})
|
||||
}
|
||||
}
|
45
crypto/secp256k1/secp256k1_internal_test.go
Normal file
45
crypto/secp256k1/secp256k1_internal_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package secp256k1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
underlyingSecp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
)
|
||||
|
||||
func Test_genPrivKey(t *testing.T) {
|
||||
|
||||
empty := make([]byte, 32)
|
||||
oneB := big.NewInt(1).Bytes()
|
||||
onePadded := make([]byte, 32)
|
||||
copy(onePadded[32-len(oneB):32], oneB)
|
||||
t.Logf("one padded: %v, len=%v", onePadded, len(onePadded))
|
||||
|
||||
validOne := append(empty, onePadded...)
|
||||
tests := []struct {
|
||||
name string
|
||||
notSoRand []byte
|
||||
shouldPanic bool
|
||||
}{
|
||||
{"empty bytes (panics because 1st 32 bytes are zero and 0 is not a valid field element)", empty, true},
|
||||
{"curve order: N", underlyingSecp256k1.S256().N.Bytes(), true},
|
||||
{"valid because 0 < 1 < N", validOne, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.shouldPanic {
|
||||
require.Panics(t, func() {
|
||||
genPrivKey(bytes.NewReader(tt.notSoRand))
|
||||
})
|
||||
return
|
||||
}
|
||||
got := genPrivKey(bytes.NewReader(tt.notSoRand))
|
||||
fe := new(big.Int).SetBytes(got[:])
|
||||
require.True(t, fe.Cmp(underlyingSecp256k1.S256().N) < 0)
|
||||
require.True(t, fe.Sign() > 0)
|
||||
})
|
||||
}
|
||||
}
|
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@@ -2,6 +2,7 @@ package secp256k1_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcutil/base58"
|
||||
@@ -84,3 +85,28 @@ func TestSecp256k1LoadPrivkeyAndSerializeIsIdentity(t *testing.T) {
|
||||
require.Equal(t, privKeyBytes[:], serializedBytes)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenPrivKeySecp256k1(t *testing.T) {
|
||||
// curve oder N
|
||||
N := underlyingSecp256k1.S256().N
|
||||
tests := []struct {
|
||||
name string
|
||||
secret []byte
|
||||
}{
|
||||
{"empty secret", []byte{}},
|
||||
{"some long secret", []byte("We live in a society exquisitely dependent on science and technology, in which hardly anyone knows anything about science and technology.")},
|
||||
{"another seed used in cosmos tests #1", []byte{0}},
|
||||
{"another seed used in cosmos tests #2", []byte("mySecret")},
|
||||
{"another seed used in cosmos tests #3", []byte("")},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotPrivKey := secp256k1.GenPrivKeySecp256k1(tt.secret)
|
||||
require.NotNil(t, gotPrivKey)
|
||||
// interpret as a big.Int and make sure it is a valid field element:
|
||||
fe := new(big.Int).SetBytes(gotPrivKey[:])
|
||||
require.True(t, fe.Cmp(N) < 0)
|
||||
require.True(t, fe.Sign() > 0)
|
||||
})
|
||||
}
|
||||
}
|
@@ -7,7 +7,6 @@ import (
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
// TODO, make this into a struct that implements crypto.Symmetric.
|
||||
@@ -19,7 +18,7 @@ const secretLen = 32
|
||||
// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext.
|
||||
func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) {
|
||||
if len(secret) != secretLen {
|
||||
cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret)))
|
||||
panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret)))
|
||||
}
|
||||
nonce := crypto.CRandBytes(nonceLen)
|
||||
nonceArr := [nonceLen]byte{}
|
||||
@@ -36,7 +35,7 @@ func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) {
|
||||
// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext.
|
||||
func DecryptSymmetric(ciphertext []byte, secret []byte) (plaintext []byte, err error) {
|
||||
if len(secret) != secretLen {
|
||||
cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret)))
|
||||
panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret)))
|
||||
}
|
||||
if len(ciphertext) <= secretbox.Overhead+nonceLen {
|
||||
return nil, errors.New("Ciphertext is too short")
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
Tendermint emits different events, to which you can subscribe via
|
||||
[Websocket](https://en.wikipedia.org/wiki/WebSocket). This can be useful
|
||||
for third-party applications (for analysys) or inspecting state.
|
||||
for third-party applications (for analysis) or inspecting state.
|
||||
|
||||
[List of events](https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants)
|
||||
|
||||
|
@@ -1,14 +1,18 @@
|
||||
# ADR 025 Commit
|
||||
|
||||
## Context
|
||||
|
||||
Currently the `Commit` structure contains a lot of potentially redundant or unnecessary data.
|
||||
In particular it contains an array of every precommit from the validators, which includes many copies of the same data. Such as `Height`, `Round`, `Type`, and `BlockID`. Also the `ValidatorIndex` could be derived from the vote's position in the array, and the `ValidatorAddress` could potentially be derived from runtime context. The only truely necessary data is the `Signature` and `Timestamp` associated with each `Vote`.
|
||||
It contains a list of precommits from every validator, where the precommit
|
||||
includes the whole `Vote` structure. Thus each of the commit height, round,
|
||||
type, and blockID are repeated for every validator, and could be deduplicated.
|
||||
|
||||
```
|
||||
type Commit struct {
|
||||
BlockID BlockID `json:"block_id"`
|
||||
Precommits []*Vote `json:"precommits"`
|
||||
}
|
||||
|
||||
type Vote struct {
|
||||
ValidatorAddress Address `json:"validator_address"`
|
||||
ValidatorIndex int `json:"validator_index"`
|
||||
@@ -26,7 +30,9 @@ References:
|
||||
[#2226](https://github.com/tendermint/tendermint/issues/2226)
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
We can improve efficiency by replacing the usage of the `Vote` struct with a subset of each vote, and by storing the constant values (`Height`, `Round`, `BlockID`) in the Commit itself.
|
||||
|
||||
```
|
||||
type Commit struct {
|
||||
Height int64
|
||||
@@ -34,42 +40,56 @@ type Commit struct {
|
||||
BlockID BlockID `json:"block_id"`
|
||||
Precommits []*CommitSig `json:"precommits"`
|
||||
}
|
||||
|
||||
type CommitSig struct {
|
||||
BlockID BlockIDFlag
|
||||
ValidatorAddress Address
|
||||
Signature []byte
|
||||
Timestamp time.Time
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
|
||||
// indicate which BlockID the signature is for
|
||||
type BlockIDFlag int
|
||||
|
||||
const (
|
||||
BlockIDFlagAbsent BlockIDFlag = iota // vote is not included in the Commit.Precommits
|
||||
BlockIDFlagCommit // voted for the Commit.BlockID
|
||||
BlockIDFlagNil // voted for nil
|
||||
)
|
||||
|
||||
```
|
||||
Continuing to store the `ValidatorAddress` in the `CommitSig` takes up extra space, but simplifies the process and allows for easier debugging.
|
||||
|
||||
Note the need for an extra byte to indicate whether the signature is for the BlockID or for nil.
|
||||
This byte can also be used to indicate an absent vote, rather than using a nil object like we currently do,
|
||||
which has been [problematic for compatibility between Amino and proto3](https://github.com/tendermint/go-amino/issues/260).
|
||||
|
||||
Note we also continue to store the `ValidatorAddress` in the `CommitSig`.
|
||||
While this still takes 20-bytes per signature, it ensures that the Commit has all
|
||||
information necessary to reconstruct Vote, which simplifies mapping between Commit and Vote objects
|
||||
and with debugging. It also may be necessary for the light-client to know which address a signature corresponds to if
|
||||
it is trying to verify a current commit with an older validtor set.
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
The size of a `Commit` transmitted over the network goes from:
|
||||
|
||||
|BlockID| + n * (|Address| + |ValidatorIndex| + |Height| + |Round| + |Timestamp| + |Type| + |BlockID| + |Signature|)
|
||||
Removing the Type/Height/Round/Index and the BlockID saves roughly 80 bytes per precommit.
|
||||
It varies because some integers are varint. The BlockID contains two 32-byte hashes an integer,
|
||||
and the Height is 8-bytes.
|
||||
|
||||
to:
|
||||
For a chain with 100 validators, that's up to 8kB in savings per block!
|
||||
|
||||
|
||||
|BlockID|+|Height|+|Round| + n*(|Address| + |Signature| + |Timestamp|)
|
||||
|
||||
This saves:
|
||||
|
||||
n * (|BlockID| + |ValidatorIndex| + |Type|) + (n-1) * (Height + Round)
|
||||
|
||||
In the current context, this would concretely be:
|
||||
(assuming all ints are int64, and hashes are 32 bytes)
|
||||
|
||||
n *(72 + 8 + 1 + 8 + 8) - 16 = n * 97 - 16
|
||||
|
||||
With 100 validators this is a savings of almost 10KB on every block.
|
||||
|
||||
### Negative
|
||||
This would add some complexity to the processing and verification of blocks and commits, as votes would have to be reconstructed to be verified and gossiped. The reconstruction could be relatively straightforward, only requiring the copying of data from the `Commit` itself into the newly created `Vote`.
|
||||
|
||||
- Large breaking change to the block and commit structure
|
||||
- Requires differentiating in code between the Vote and CommitSig objects, which may add some complexity (votes need to be reconstructed to be verified and gossiped)
|
||||
|
||||
### Neutral
|
||||
This design leaves the `ValidatorAddress` in the `CommitSig` and in the `Vote`. These could be removed at some point for additional savings, but that would introduce more complexity, and make printing of `Commit` and `VoteSet` objects less informative, which could harm debugging efficiency and UI/UX.
|
||||
|
||||
- Commit.Precommits no longer contains nil values
|
||||
|
100
docs/architecture/adr-037-deliver-block.md
Normal file
100
docs/architecture/adr-037-deliver-block.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# ADR 037: Deliver Block
|
||||
|
||||
Author: Daniil Lashin (@danil-lashin)
|
||||
|
||||
## Changelog
|
||||
|
||||
13-03-2019: Initial draft
|
||||
|
||||
## Context
|
||||
|
||||
Initial conversation: https://github.com/tendermint/tendermint/issues/2901
|
||||
|
||||
Some applications can handle transactions in parallel, or at least some
|
||||
part of tx processing can be parallelized. Now it is not possible for developer
|
||||
to execute txs in parallel because Tendermint delivers them consequentially.
|
||||
|
||||
## Decision
|
||||
|
||||
Now Tendermint have `BeginBlock`, `EndBlock`, `Commit`, `DeliverTx` steps
|
||||
while executing block. This doc proposes merging this steps into one `DeliverBlock`
|
||||
step. It will allow developers of applications to decide how they want to
|
||||
execute transactions (in parallel or consequentially). Also it will simplify and
|
||||
speed up communications between application and Tendermint.
|
||||
|
||||
As @jaekwon [mentioned](https://github.com/tendermint/tendermint/issues/2901#issuecomment-477746128)
|
||||
in discussion not all application will benefit from this solution. In some cases,
|
||||
when application handles transaction consequentially, it way slow down the blockchain,
|
||||
because it need to wait until full block is transmitted to application to start
|
||||
processing it. Also, in the case of complete change of ABCI, we need to force all the apps
|
||||
to change their implementation completely. That's why I propose to introduce one more ABCI
|
||||
type.
|
||||
|
||||
# Implementation Changes
|
||||
|
||||
In addition to default application interface which now have this structure
|
||||
|
||||
```go
|
||||
type Application interface {
|
||||
// Info and Mempool methods...
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore
|
||||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
|
||||
DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing
|
||||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
}
|
||||
```
|
||||
|
||||
this doc proposes to add one more:
|
||||
|
||||
```go
|
||||
type Application interface {
|
||||
// Info and Mempool methods...
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore
|
||||
DeliverBlock(RequestDeliverBlock) ResponseDeliverBlock // Deliver full block
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
}
|
||||
|
||||
type RequestDeliverBlock struct {
|
||||
Hash []byte
|
||||
Header Header
|
||||
Txs Txs
|
||||
LastCommitInfo LastCommitInfo
|
||||
ByzantineValidators []Evidence
|
||||
}
|
||||
|
||||
type ResponseDeliverBlock struct {
|
||||
ValidatorUpdates []ValidatorUpdate
|
||||
ConsensusParamUpdates *ConsensusParams
|
||||
Tags []common.KVPair
|
||||
TxResults []ResponseDeliverTx
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Also, we will need to add new config param, which will specify what kind of ABCI application uses.
|
||||
For example, it can be `abci_type`. Then we will have 2 types:
|
||||
- `advanced` - current ABCI
|
||||
- `simple` - proposed implementation
|
||||
|
||||
## Status
|
||||
|
||||
In review
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- much simpler introduction and tutorials for new developers (instead of implementing 5 methods whey
|
||||
will need to implement only 3)
|
||||
- txs can be handled in parallel
|
||||
- simpler interface
|
||||
- faster communications between Tendermint and application
|
||||
|
||||
### Negative
|
||||
|
||||
- Tendermint should now support 2 kinds of ABCI
|
159
docs/architecture/adr-039-peer-behaviour.md
Normal file
159
docs/architecture/adr-039-peer-behaviour.md
Normal file
@@ -0,0 +1,159 @@
|
||||
# ADR 039: Peer Behaviour Interface
|
||||
|
||||
## Changelog
|
||||
* 07-03-2019: Initial draft
|
||||
* 14-03-2019: Updates from feedback
|
||||
|
||||
## Context
|
||||
|
||||
The responsibility for signaling and acting upon peer behaviour lacks a single
|
||||
owning component and is heavily coupled with the network stack[<sup>1</sup>](#references). Reactors
|
||||
maintain a reference to the `p2p.Switch` which they use to call
|
||||
`switch.StopPeerForError(...)` when a peer misbehaves and
|
||||
`switch.MarkAsGood(...)` when a peer contributes in some meaningful way.
|
||||
While the switch handles `StopPeerForError` internally, the `MarkAsGood`
|
||||
method delegates to another component, `p2p.AddrBook`. This scheme of delegation
|
||||
across Switch obscures the responsibility for handling peer behaviour
|
||||
and ties up the reactors in a larger dependency graph when testing.
|
||||
|
||||
## Decision
|
||||
|
||||
Introduce a `PeerBehaviour` interface and concrete implementations which
|
||||
provide methods for reactors to signal peer behaviour without direct
|
||||
coupling `p2p.Switch`. Introduce a ErrorBehaviourPeer to provide
|
||||
concrete reasons for stopping peers. Introduce GoodBehaviourPeer to provide
|
||||
concrete ways in which a peer contributes.
|
||||
|
||||
### Implementation Changes
|
||||
|
||||
PeerBehaviour then becomes an interface for signaling peer errors as well
|
||||
as for marking peers as `good`.
|
||||
|
||||
```go
|
||||
type PeerBehaviour interface {
|
||||
Behaved(peer Peer, reason GoodBehaviourPeer)
|
||||
Errored(peer Peer, reason ErrorBehaviourPeer)
|
||||
}
|
||||
```
|
||||
|
||||
Instead of signaling peers to stop with arbitrary reasons:
|
||||
`reason interface{}`
|
||||
|
||||
We introduce a concrete error type ErrorBehaviourPeer:
|
||||
```go
|
||||
type ErrorBehaviourPeer int
|
||||
|
||||
const (
|
||||
ErrorBehaviourUnknown = iota
|
||||
ErrorBehaviourBadMessage
|
||||
ErrorBehaviourMessageOutofOrder
|
||||
...
|
||||
)
|
||||
```
|
||||
|
||||
To provide additional information on the ways a peer contributed, we introduce
|
||||
the GoodBehaviourPeer type.
|
||||
|
||||
```go
|
||||
type GoodBehaviourPeer int
|
||||
|
||||
const (
|
||||
GoodBehaviourVote = iota
|
||||
GoodBehaviourBlockPart
|
||||
...
|
||||
)
|
||||
```
|
||||
|
||||
As a first iteration we provide a concrete implementation which wraps
|
||||
the switch:
|
||||
```go
|
||||
type SwitchedPeerBehaviour struct {
|
||||
sw *Switch
|
||||
}
|
||||
|
||||
func (spb *SwitchedPeerBehaviour) Errored(peer Peer, reason ErrorBehaviourPeer) {
|
||||
spb.sw.StopPeerForError(peer, reason)
|
||||
}
|
||||
|
||||
func (spb *SwitchedPeerBehaviour) Behaved(peer Peer, reason GoodBehaviourPeer) {
|
||||
spb.sw.MarkPeerAsGood(peer)
|
||||
}
|
||||
|
||||
func NewSwitchedPeerBehaviour(sw *Switch) *SwitchedPeerBehaviour {
|
||||
return &SwitchedPeerBehaviour{
|
||||
sw: sw,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Reactors, which are often difficult to unit test[<sup>2</sup>](#references) could use an implementation which exposes the signals produced by the reactor in
|
||||
manufactured scenarios:
|
||||
|
||||
```go
|
||||
type ErrorBehaviours map[Peer][]ErrorBehaviourPeer
|
||||
type GoodBehaviours map[Peer][]GoodBehaviourPeer
|
||||
|
||||
type StorePeerBehaviour struct {
|
||||
eb ErrorBehaviours
|
||||
gb GoodBehaviours
|
||||
}
|
||||
|
||||
func NewStorePeerBehaviour() *StorePeerBehaviour{
|
||||
return &StorePeerBehaviour{
|
||||
eb: make(ErrorBehaviours),
|
||||
gb: make(GoodBehaviours),
|
||||
}
|
||||
}
|
||||
|
||||
func (spb StorePeerBehaviour) Errored(peer Peer, reason ErrorBehaviourPeer) {
|
||||
if _, ok := spb.eb[peer]; !ok {
|
||||
spb.eb[peer] = []ErrorBehaviours{reason}
|
||||
} else {
|
||||
spb.eb[peer] = append(spb.eb[peer], reason)
|
||||
}
|
||||
}
|
||||
|
||||
func (mpb *StorePeerBehaviour) GetErrored() ErrorBehaviours {
|
||||
return mpb.eb
|
||||
}
|
||||
|
||||
|
||||
func (spb StorePeerBehaviour) Behaved(peer Peer, reason GoodBehaviourPeer) {
|
||||
if _, ok := spb.gb[peer]; !ok {
|
||||
spb.gb[peer] = []GoodBehaviourPeer{reason}
|
||||
} else {
|
||||
spb.gb[peer] = append(spb.gb[peer], reason)
|
||||
}
|
||||
}
|
||||
|
||||
func (spb *StorePeerBehaviour) GetBehaved() GoodBehaviours {
|
||||
return spb.gb
|
||||
}
|
||||
```
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
* De-couple signaling from acting upon peer behaviour.
|
||||
* Reduce the coupling of reactors and the Switch and the network
|
||||
stack
|
||||
* The responsibility of managing peer behaviour can be migrated to
|
||||
a single component instead of split between the switch and the
|
||||
address book.
|
||||
|
||||
### Negative
|
||||
|
||||
* The first iteration will simply wrap the Switch and introduce a
|
||||
level of indirection.
|
||||
|
||||
### Neutral
|
||||
|
||||
## References
|
||||
|
||||
1. Issue [#2067](https://github.com/tendermint/tendermint/issues/2067): P2P Refactor
|
||||
2. PR: [#3506](https://github.com/tendermint/tendermint/pull/3506): ADR 036: Blockchain Reactor Refactor
|
534
docs/architecture/adr-040-blockchain-reactor-refactor.md
Normal file
534
docs/architecture/adr-040-blockchain-reactor-refactor.md
Normal file
@@ -0,0 +1,534 @@
|
||||
# ADR 040: Blockchain Reactor Refactor
|
||||
|
||||
## Changelog
|
||||
|
||||
19-03-2019: Initial draft
|
||||
|
||||
## Context
|
||||
|
||||
The Blockchain Reactor's high level responsibility is to enable peers who are far behind the current state of the
|
||||
blockchain to quickly catch up by downloading many blocks in parallel from its peers, verifying block correctness, and
|
||||
executing them against the ABCI application. We call the protocol executed by the Blockchain Reactor `fast-sync`.
|
||||
The current architecture diagram of the blockchain reactor can be found here:
|
||||
|
||||

|
||||
|
||||
The current architecture consists of dozens of routines and it is tightly depending on the `Switch`, making writing
|
||||
unit tests almost impossible. Current tests require setting up complex dependency graphs and dealing with concurrency.
|
||||
Note that having dozens of routines is in this case overkill as most of the time routines sits idle waiting for
|
||||
something to happen (message to arrive or timeout to expire). Due to dependency on the `Switch`, testing relatively
|
||||
complex network scenarios and failures (for example adding and removing peers) is very complex tasks and frequently lead
|
||||
to complex tests with not deterministic behavior ([#3400]). Impossibility to write proper tests makes confidence in
|
||||
the code low and this resulted in several issues (some are fixed in the meantime and some are still open):
|
||||
[#3400], [#2897], [#2896], [#2699], [#2888], [#2457], [#2622], [#2026].
|
||||
|
||||
## Decision
|
||||
|
||||
To remedy these issues we plan a major refactor of the blockchain reactor. The proposed architecture is largely inspired
|
||||
by ADR-30 and is presented on the following diagram:
|
||||

|
||||
|
||||
We suggest a concurrency architecture where the core algorithm (we call it `Controller`) is extracted into a finite
|
||||
state machine. The active routine of the reactor is called `Executor` and is responsible for receiving and sending
|
||||
messages from/to peers and triggering timeouts. What messages should be sent and timeouts triggered is determined mostly
|
||||
by the `Controller`. The exception is `Peer Heartbeat` mechanism which is `Executor` responsibility. The heartbeat
|
||||
mechanism is used to remove slow and unresponsive peers from the peer list. Writing of unit tests is simpler with
|
||||
this architecture as most of the critical logic is part of the `Controller` function. We expect that simpler concurrency
|
||||
architecture will not have significant negative effect on the performance of this reactor (to be confirmed by
|
||||
experimental evaluation).
|
||||
|
||||
|
||||
### Implementation changes
|
||||
|
||||
We assume the following system model for "fast sync" protocol:
|
||||
|
||||
* a node is connected to a random subset of all nodes that represents its peer set. Some nodes are correct and some
|
||||
might be faulty. We don't make assumptions about ratio of faulty nodes, i.e., it is possible that all nodes in some
|
||||
peer set are faulty.
|
||||
* we assume that communication between correct nodes is synchronous, i.e., if a correct node `p` sends a message `m` to
|
||||
a correct node `q` at time `t`, then `q` will receive message the latest at time `t+Delta` where `Delta` is a system
|
||||
parameter that is known by network participants. `Delta` is normally chosen to be an order of magnitude higher than
|
||||
the real communication delay (maximum) between correct nodes. Therefore if a correct node `p` sends a request message
|
||||
to a correct node `q` at time `t` and there is no the corresponding reply at time `t + 2*Delta`, then `p` can assume
|
||||
that `q` is faulty. Note that the network assumptions for the consensus reactor are different (we assume partially
|
||||
synchronous model there).
|
||||
|
||||
The requirements for the "fast sync" protocol are formally specified as follows:
|
||||
|
||||
- `Correctness`: If a correct node `p` is connected to a correct node `q` for a long enough period of time, then `p`
|
||||
- will eventually download all requested blocks from `q`.
|
||||
- `Termination`: If a set of peers of a correct node `p` is stable (no new nodes are added to the peer set of `p`) for
|
||||
- a long enough period of time, then protocol eventually terminates.
|
||||
- `Fairness`: A correct node `p` sends requests for blocks to all peers from its peer set.
|
||||
|
||||
As explained above, the `Executor` is responsible for sending and receiving messages that are part of the `fast-sync`
|
||||
protocol. The following messages are exchanged as part of `fast-sync` protocol:
|
||||
|
||||
``` go
|
||||
type Message int
|
||||
const (
|
||||
MessageUnknown Message = iota
|
||||
MessageStatusRequest
|
||||
MessageStatusResponse
|
||||
MessageBlockRequest
|
||||
MessageBlockResponse
|
||||
)
|
||||
```
|
||||
`MessageStatusRequest` is sent periodically to all peers as a request for a peer to provide its current height. It is
|
||||
part of the `Peer Heartbeat` mechanism and a failure to respond timely to this message results in a peer being removed
|
||||
from the peer set. Note that the `Peer Heartbeat` mechanism is used only while a peer is in `fast-sync` mode. We assume
|
||||
here existence of a mechanism that gives node a possibility to inform its peers that it is in the `fast-sync` mode.
|
||||
|
||||
``` go
|
||||
type MessageStatusRequest struct {
|
||||
SeqNum int64 // sequence number of the request
|
||||
}
|
||||
```
|
||||
`MessageStatusResponse` is sent as a response to `MessageStatusRequest` to inform requester about the peer current
|
||||
height.
|
||||
|
||||
``` go
|
||||
type MessageStatusResponse struct {
|
||||
SeqNum int64 // sequence number of the corresponding request
|
||||
Height int64 // current peer height
|
||||
}
|
||||
```
|
||||
|
||||
`MessageBlockRequest` is used to make a request for a block and the corresponding commit certificate at a given height.
|
||||
|
||||
``` go
|
||||
type MessageBlockRequest struct {
|
||||
Height int64
|
||||
}
|
||||
```
|
||||
|
||||
`MessageBlockResponse` is a response for the corresponding block request. In addition to providing the block and the
|
||||
corresponding commit certificate, it contains also a current peer height.
|
||||
|
||||
``` go
|
||||
type MessageBlockResponse struct {
|
||||
Height int64
|
||||
Block Block
|
||||
Commit Commit
|
||||
PeerHeight int64
|
||||
}
|
||||
```
|
||||
|
||||
In addition to sending and receiving messages, and `HeartBeat` mechanism, controller is also managing timeouts
|
||||
that are triggered upon `Controller` request. `Controller` is then informed once a timeout expires.
|
||||
|
||||
``` go
|
||||
type TimeoutTrigger int
|
||||
const (
|
||||
TimeoutUnknown TimeoutTrigger = iota
|
||||
TimeoutResponseTrigger
|
||||
TimeoutTerminationTrigger
|
||||
)
|
||||
```
|
||||
|
||||
The `Controller` can be modelled as a function with clearly defined inputs:
|
||||
|
||||
* `State` - current state of the node. Contains data about connected peers and its behavior, pending requests,
|
||||
* received blocks, etc.
|
||||
* `Event` - significant events in the network.
|
||||
|
||||
producing clear outputs:
|
||||
|
||||
* `State` - updated state of the node,
|
||||
* `MessageToSend` - signal what message to send and to which peer
|
||||
* `TimeoutTrigger` - signal that timeout should be triggered.
|
||||
|
||||
|
||||
We consider the following `Event` types:
|
||||
|
||||
``` go
|
||||
type Event int
|
||||
const (
|
||||
EventUnknown Event = iota
|
||||
EventStatusReport
|
||||
EventBlockRequest
|
||||
EventBlockResponse
|
||||
EventRemovePeer
|
||||
EventTimeoutResponse
|
||||
EventTimeoutTermination
|
||||
)
|
||||
```
|
||||
|
||||
`EventStatusResponse` event is generated once `MessageStatusResponse` is received by the `Executor`.
|
||||
|
||||
``` go
|
||||
type EventStatusReport struct {
|
||||
PeerID ID
|
||||
Height int64
|
||||
}
|
||||
```
|
||||
|
||||
`EventBlockRequest` event is generated once `MessageBlockRequest` is received by the `Executor`.
|
||||
|
||||
``` go
|
||||
type EventBlockRequest struct {
|
||||
Height int64
|
||||
PeerID p2p.ID
|
||||
}
|
||||
```
|
||||
`EventBlockResponse` event is generated upon reception of `MessageBlockResponse` message by the `Executor`.
|
||||
|
||||
``` go
|
||||
type EventBlockResponse struct {
|
||||
Height int64
|
||||
Block Block
|
||||
Commit Commit
|
||||
PeerID ID
|
||||
PeerHeight int64
|
||||
}
|
||||
```
|
||||
`EventRemovePeer` is generated by `Executor` to signal that the connection to a peer is closed due to peer misbehavior.
|
||||
|
||||
``` go
|
||||
type EventRemovePeer struct {
|
||||
PeerID ID
|
||||
}
|
||||
```
|
||||
`EventTimeoutResponse` is generated by `Executor` to signal that a timeout triggered by `TimeoutResponseTrigger` has
|
||||
expired.
|
||||
|
||||
``` go
|
||||
type EventTimeoutResponse struct {
|
||||
PeerID ID
|
||||
Height int64
|
||||
}
|
||||
```
|
||||
`EventTimeoutTermination` is generated by `Executor` to signal that a timeout triggered by `TimeoutTerminationTrigger`
|
||||
has expired.
|
||||
|
||||
``` go
|
||||
type EventTimeoutTermination struct {
|
||||
Height int64
|
||||
}
|
||||
```
|
||||
|
||||
`MessageToSend` is just a wrapper around `Message` type that contains id of the peer to which message should be sent.
|
||||
|
||||
``` go
|
||||
type MessageToSend struct {
|
||||
PeerID ID
|
||||
Message Message
|
||||
}
|
||||
```
|
||||
|
||||
The Controller state machine can be in two modes: `ModeFastSync` when
|
||||
a node is trying to catch up with the network by downloading committed blocks,
|
||||
and `ModeConsensus` in which it executes Tendermint consensus protocol. We
|
||||
consider that `fast sync` mode terminates once the Controller switch to
|
||||
`ModeConsensus`.
|
||||
|
||||
``` go
|
||||
type Mode int
|
||||
const (
|
||||
ModeUnknown Mode = iota
|
||||
ModeFastSync
|
||||
ModeConsensus
|
||||
)
|
||||
```
|
||||
`Controller` is managing the following state:
|
||||
|
||||
``` go
|
||||
type ControllerState struct {
|
||||
Height int64 // the first block that is not committed
|
||||
Mode Mode // mode of operation
|
||||
PeerMap map[ID]PeerStats // map of peer IDs to peer statistics
|
||||
MaxRequestPending int64 // maximum height of the pending requests
|
||||
FailedRequests []int64 // list of failed block requests
|
||||
PendingRequestsNum int // total number of pending requests
|
||||
Store []BlockInfo // contains list of downloaded blocks
|
||||
Executor BlockExecutor // store, verify and executes blocks
|
||||
}
|
||||
```
|
||||
|
||||
`PeerStats` data structure keeps for every peer its current height and a list of pending requests for blocks.
|
||||
|
||||
``` go
|
||||
type PeerStats struct {
|
||||
Height int64
|
||||
PendingRequest int64 // a request sent to this peer
|
||||
}
|
||||
```
|
||||
|
||||
`BlockInfo` data structure is used to store information (as part of block store) about downloaded blocks: from what peer
|
||||
a block and the corresponding commit certificate are received.
|
||||
``` go
|
||||
type BlockInfo struct {
|
||||
Block Block
|
||||
Commit Commit
|
||||
PeerID ID // a peer from which we received the corresponding Block and Commit
|
||||
}
|
||||
```
|
||||
|
||||
The `Controller` is initialized by providing an initial height (`startHeight`) from which it will start downloading
|
||||
blocks from peers and the current state of the `BlockExecutor`.
|
||||
|
||||
``` go
|
||||
func NewControllerState(startHeight int64, executor BlockExecutor) ControllerState {
|
||||
state = ControllerState {}
|
||||
state.Height = startHeight
|
||||
state.Mode = ModeFastSync
|
||||
state.MaxRequestPending = startHeight - 1
|
||||
state.PendingRequestsNum = 0
|
||||
state.Executor = executor
|
||||
initialize state.PeerMap, state.FailedRequests and state.Store to empty data structures
|
||||
return state
|
||||
}
|
||||
```
|
||||
|
||||
The core protocol logic is given with the following function:
|
||||
|
||||
``` go
|
||||
func handleEvent(state ControllerState, event Event) (ControllerState, Message, TimeoutTrigger, Error) {
|
||||
msg = nil
|
||||
timeout = nil
|
||||
error = nil
|
||||
|
||||
switch state.Mode {
|
||||
case ModeConsensus:
|
||||
switch event := event.(type) {
|
||||
case EventBlockRequest:
|
||||
msg = createBlockResponseMessage(state, event)
|
||||
return state, msg, timeout, error
|
||||
default:
|
||||
error = "Only respond to BlockRequests while in ModeConsensus!"
|
||||
return state, msg, timeout, error
|
||||
}
|
||||
|
||||
case ModeFastSync:
|
||||
switch event := event.(type) {
|
||||
case EventBlockRequest:
|
||||
msg = createBlockResponseMessage(state, event)
|
||||
return state, msg, timeout, error
|
||||
|
||||
case EventStatusResponse:
|
||||
return handleEventStatusResponse(event, state)
|
||||
|
||||
case EventRemovePeer:
|
||||
return handleEventRemovePeer(event, state)
|
||||
|
||||
case EventBlockResponse:
|
||||
return handleEventBlockResponse(event, state)
|
||||
|
||||
case EventResponseTimeout:
|
||||
return handleEventResponseTimeout(event, state)
|
||||
|
||||
case EventTerminationTimeout:
|
||||
// Termination timeout is triggered in case of empty peer set and in case there are no pending requests.
|
||||
// If this timeout expires and in the meantime no new peers are added or new pending requests are made
|
||||
// then `fast-sync` mode terminates by switching to `ModeConsensus`.
|
||||
// Note that termination timeout should be higher than the response timeout.
|
||||
if state.Height == event.Height && state.PendingRequestsNum == 0 { state.State = ConsensusMode }
|
||||
return state, msg, timeout, error
|
||||
|
||||
default:
|
||||
error = "Received unknown event type!"
|
||||
return state, msg, timeout, error
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
``` go
|
||||
func createBlockResponseMessage(state ControllerState, event BlockRequest) MessageToSend {
|
||||
msgToSend = nil
|
||||
if _, ok := state.PeerMap[event.PeerID]; !ok { peerStats = PeerStats{-1, -1} }
|
||||
if state.Executor.ContainsBlockWithHeight(event.Height) && event.Height > peerStats.Height {
|
||||
peerStats = event.Height
|
||||
msg = BlockResponseMessage{
|
||||
Height: event.Height,
|
||||
Block: state.Executor.getBlock(eventHeight),
|
||||
Commit: state.Executor.getCommit(eventHeight),
|
||||
PeerID: event.PeerID,
|
||||
CurrentHeight: state.Height - 1,
|
||||
}
|
||||
msgToSend = MessageToSend { event.PeerID, msg }
|
||||
}
|
||||
state.PeerMap[event.PeerID] = peerStats
|
||||
return msgToSend
|
||||
}
|
||||
```
|
||||
|
||||
``` go
|
||||
func handleEventStatusResponse(event EventStatusResponse, state ControllerState) (ControllerState, MessageToSend, TimeoutTrigger, Error) {
|
||||
if _, ok := state.PeerMap[event.PeerID]; !ok {
|
||||
peerStats = PeerStats{ -1, -1 }
|
||||
} else {
|
||||
peerStats = state.PeerMap[event.PeerID]
|
||||
}
|
||||
|
||||
if event.Height > peerStats.Height { peerStats.Height = event.Height }
|
||||
// if there are no pending requests for this peer, try to send him a request for block
|
||||
if peerStats.PendingRequest == -1 {
|
||||
msg = createBlockRequestMessages(state, event.PeerID, peerStats.Height)
|
||||
// msg is nil if no request for block can be made to a peer at this point in time
|
||||
if msg != nil {
|
||||
peerStats.PendingRequests = msg.Height
|
||||
state.PendingRequestsNum++
|
||||
// when a request for a block is sent to a peer, a response timeout is triggered. If no corresponding block is sent by the peer
|
||||
// during response timeout period, then the peer is considered faulty and is removed from the peer set.
|
||||
timeout = ResponseTimeoutTrigger{ msg.PeerID, msg.Height, PeerTimeout }
|
||||
} else if state.PendingRequestsNum == 0 {
|
||||
// if there are no pending requests and no new request can be placed to the peer, termination timeout is triggered.
|
||||
// If termination timeout expires and we are still at the same height and there are no pending requests, the "fast-sync"
|
||||
// mode is finished and we switch to `ModeConsensus`.
|
||||
timeout = TerminationTimeoutTrigger{ state.Height, TerminationTimeout }
|
||||
}
|
||||
}
|
||||
state.PeerMap[event.PeerID] = peerStats
|
||||
return state, msg, timeout, error
|
||||
}
|
||||
```
|
||||
|
||||
``` go
|
||||
func handleEventRemovePeer(event EventRemovePeer, state ControllerState) (ControllerState, MessageToSend, TimeoutTrigger, Error) {
|
||||
if _, ok := state.PeerMap[event.PeerID]; ok {
|
||||
pendingRequest = state.PeerMap[event.PeerID].PendingRequest
|
||||
// if a peer is removed from the peer set, its pending request is declared failed and added to the `FailedRequests` list
|
||||
// so it can be retried.
|
||||
if pendingRequest != -1 {
|
||||
add(state.FailedRequests, pendingRequest)
|
||||
}
|
||||
state.PendingRequestsNum--
|
||||
delete(state.PeerMap, event.PeerID)
|
||||
// if the peer set is empty after removal of this peer then termination timeout is triggered.
|
||||
if state.PeerMap.isEmpty() {
|
||||
timeout = TerminationTimeoutTrigger{ state.Height, TerminationTimeout }
|
||||
}
|
||||
} else { error = "Removing unknown peer!" }
|
||||
return state, msg, timeout, error
|
||||
```
|
||||
|
||||
``` go
|
||||
func handleEventBlockResponse(event EventBlockResponse, state ControllerState) (ControllerState, MessageToSend, TimeoutTrigger, Error)
|
||||
if state.PeerMap[event.PeerID] {
|
||||
peerStats = state.PeerMap[event.PeerID]
|
||||
// when expected block arrives from a peer, it is added to the store so it can be verified and if correct executed after.
|
||||
if peerStats.PendingRequest == event.Height {
|
||||
peerStats.PendingRequest = -1
|
||||
state.PendingRequestsNum--
|
||||
if event.PeerHeight > peerStats.Height { peerStats.Height = event.PeerHeight }
|
||||
state.Store[event.Height] = BlockInfo{ event.Block, event.Commit, event.PeerID }
|
||||
// blocks are verified sequentially so adding a block to the store does not mean that it will be immediately verified
|
||||
// as some of the previous blocks might be missing.
|
||||
state = verifyBlocks(state) // it can lead to event.PeerID being removed from peer list
|
||||
if _, ok := state.PeerMap[event.PeerID]; ok {
|
||||
// we try to identify new request for a block that can be asked to the peer
|
||||
msg = createBlockRequestMessage(state, event.PeerID, peerStats.Height)
|
||||
if msg != nil {
|
||||
peerStats.PendingRequests = msg.Height
|
||||
state.PendingRequestsNum++
|
||||
// if request for block is made, response timeout is triggered
|
||||
timeout = ResponseTimeoutTrigger{ msg.PeerID, msg.Height, PeerTimeout }
|
||||
} else if state.PeerMap.isEmpty() || state.PendingRequestsNum == 0 {
|
||||
// if the peer map is empty (the peer can be removed as block verification failed) or there are no pending requests
|
||||
// termination timeout is triggered.
|
||||
timeout = TerminationTimeoutTrigger{ state.Height, TerminationTimeout }
|
||||
}
|
||||
}
|
||||
} else { error = "Received Block from wrong peer!" }
|
||||
} else { error = "Received Block from unknown peer!" }
|
||||
|
||||
state.PeerMap[event.PeerID] = peerStats
|
||||
return state, msg, timeout, error
|
||||
}
|
||||
```
|
||||
|
||||
``` go
|
||||
func handleEventResponseTimeout(event, state) {
|
||||
if _, ok := state.PeerMap[event.PeerID]; ok {
|
||||
peerStats = state.PeerMap[event.PeerID]
|
||||
// if a response timeout expires and the peer hasn't delivered the block, the peer is removed from the peer list and
|
||||
// the request is added to the `FailedRequests` so the block can be downloaded from other peer
|
||||
if peerStats.PendingRequest == event.Height {
|
||||
add(state.FailedRequests, pendingRequest)
|
||||
delete(state.PeerMap, event.PeerID)
|
||||
state.PendingRequestsNum--
|
||||
// if peer set is empty, then termination timeout is triggered
|
||||
if state.PeerMap.isEmpty() {
|
||||
timeout = TimeoutTrigger{ state.Height, TerminationTimeout }
|
||||
}
|
||||
}
|
||||
}
|
||||
return state, msg, timeout, error
|
||||
}
|
||||
```
|
||||
|
||||
``` go
|
||||
func createBlockRequestMessage(state ControllerState, peerID ID, peerHeight int64) MessageToSend {
|
||||
msg = nil
|
||||
blockHeight = -1
|
||||
r = find request in state.FailedRequests such that r <= peerHeight // returns `nil` if there are no such request
|
||||
// if there is a height in failed requests that can be downloaded from the peer send request to it
|
||||
if r != nil {
|
||||
blockNumber = r
|
||||
delete(state.FailedRequests, r)
|
||||
} else if state.MaxRequestPending < peerHeight {
|
||||
// if height of the maximum pending request is smaller than peer height, then ask peer for next block
|
||||
state.MaxRequestPending++
|
||||
blockHeight = state.MaxRequestPending // increment state.MaxRequestPending and then return the new value
|
||||
}
|
||||
|
||||
if blockHeight > -1 { msg = MessageToSend { peerID, MessageBlockRequest { blockHeight } }
|
||||
return msg
|
||||
}
|
||||
```
|
||||
|
||||
``` go
|
||||
func verifyBlocks(state State) State {
|
||||
done = false
|
||||
for !done {
|
||||
block = state.Store[height]
|
||||
if block != nil {
|
||||
verified = verify block.Block using block.Commit // return `true` is verification succeed, 'false` otherwise
|
||||
|
||||
if verified {
|
||||
block.Execute() // executing block is costly operation so it might make sense executing asynchronously
|
||||
state.Height++
|
||||
} else {
|
||||
// if block verification failed, then it is added to `FailedRequests` and the peer is removed from the peer set
|
||||
add(state.FailedRequests, height)
|
||||
state.Store[height] = nil
|
||||
if _, ok := state.PeerMap[block.PeerID]; ok {
|
||||
pendingRequest = state.PeerMap[block.PeerID].PendingRequest
|
||||
// if there is a pending request sent to the peer that is just to be removed from the peer set, add it to `FailedRequests`
|
||||
if pendingRequest != -1 {
|
||||
add(state.FailedRequests, pendingRequest)
|
||||
state.PendingRequestsNum--
|
||||
}
|
||||
delete(state.PeerMap, event.PeerID)
|
||||
}
|
||||
done = true
|
||||
}
|
||||
} else { done = true }
|
||||
}
|
||||
return state
|
||||
}
|
||||
```
|
||||
|
||||
In the proposed architecture `Controller` is not active task, i.e., it is being called by the `Executor`. Depending on
|
||||
the return values returned by `Controller`,`Executor` will send a message to some peer (`msg` != nil), trigger a
|
||||
timeout (`timeout` != nil) or deal with errors (`error` != nil).
|
||||
In case a timeout is triggered, it will provide as an input to `Controller` the corresponding timeout event once
|
||||
timeout expires.
|
||||
|
||||
|
||||
## Status
|
||||
|
||||
Draft.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- isolated implementation of the algorithm
|
||||
- improved testability - simpler to prove correctness
|
||||
- clearer separation of concerns - easier to reason
|
||||
|
||||
### Negative
|
||||
|
||||
### Neutral
|
29
docs/architecture/adr-041-proposer-selection-via-abci.md
Normal file
29
docs/architecture/adr-041-proposer-selection-via-abci.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# ADR 041: Application should be in charge of validator set
|
||||
|
||||
## Changelog
|
||||
|
||||
|
||||
## Context
|
||||
|
||||
Currently Tendermint is in charge of validator set and proposer selection. Application can only update the validator set changes at EndBlock time.
|
||||
To support Light Client, application should make sure at least 2/3 of validator are same at each round.
|
||||
|
||||
Application should have full control on validator set changes and proposer selection. In each round Application can provide the list of validators for next rounds in order with their power. The proposer is the first in the list, in case the proposer is offline, the next one can propose the proposal and so on.
|
||||
|
||||
## Decision
|
||||
|
||||
## Status
|
||||
|
||||
## Consequences
|
||||
|
||||
Tendermint is no more in charge of validator set and its changes. The Application should provide the correct information.
|
||||
However Tendermint can provide psedo-randomness algorithm to help application for selecting proposer in each round.
|
||||
|
||||
### Positive
|
||||
|
||||
### Negative
|
||||
|
||||
### Neutral
|
||||
|
||||
## References
|
||||
|
BIN
docs/architecture/img/bc-reactor-refactor.png
Normal file
BIN
docs/architecture/img/bc-reactor-refactor.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 7.9 KiB |
BIN
docs/architecture/img/bc-reactor.png
Normal file
BIN
docs/architecture/img/bc-reactor.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 43 KiB |
@@ -79,9 +79,7 @@ make install
|
||||
|
||||
Install [LevelDB](https://github.com/google/leveldb) (minimum version is 1.7).
|
||||
|
||||
### Ubuntu
|
||||
|
||||
Install LevelDB with snappy (optionally):
|
||||
Install LevelDB with snappy (optionally). Below are commands for Ubuntu:
|
||||
|
||||
```
|
||||
sudo apt-get update
|
||||
@@ -100,23 +98,23 @@ wget https://github.com/google/leveldb/archive/v1.20.tar.gz && \
|
||||
rm -f v1.20.tar.gz
|
||||
```
|
||||
|
||||
Set database backend to cleveldb:
|
||||
Set a database backend to `cleveldb`:
|
||||
|
||||
```
|
||||
# config/config.toml
|
||||
db_backend = "cleveldb"
|
||||
```
|
||||
|
||||
To install Tendermint, run
|
||||
To install Tendermint, run:
|
||||
|
||||
```
|
||||
CGO_LDFLAGS="-lsnappy" make install_c
|
||||
```
|
||||
|
||||
or run
|
||||
or run:
|
||||
|
||||
```
|
||||
CGO_LDFLAGS="-lsnappy" make build_c
|
||||
```
|
||||
|
||||
to put the binary in `./build`.
|
||||
which puts the binary in `./build`.
|
||||
|
@@ -4,7 +4,7 @@ With Docker Compose, you can spin up local testnets with a single command.
|
||||
|
||||
## Requirements
|
||||
|
||||
1. [Install tendermint](/docs/introduction/install.md)
|
||||
1. [Install tendermint](../introduction/install.md)
|
||||
2. [Install docker](https://docs.docker.com/engine/installation/)
|
||||
3. [Install docker-compose](https://docs.docker.com/compose/install/)
|
||||
|
||||
|
@@ -62,16 +62,18 @@ There are several roles that are self-explanatory:
|
||||
First, we configure our droplets by specifying the paths for tendermint
|
||||
(`BINARY`) and the node files (`CONFIGDIR`). The latter expects any
|
||||
number of directories named `node0, node1, ...` and so on (equal to the
|
||||
number of droplets created). For this example, we use pre-created files
|
||||
from [this
|
||||
directory](https://github.com/tendermint/tendermint/tree/master/docs/examples).
|
||||
To create your own files, use either the `tendermint testnet` command or
|
||||
review [manual deployments](./deploy-testnets.md).
|
||||
number of droplets created).
|
||||
|
||||
Here's the command to run:
|
||||
To create the node files run:
|
||||
|
||||
```
|
||||
ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples
|
||||
tendermint testnet
|
||||
```
|
||||
|
||||
Then, to configure our droplets run:
|
||||
|
||||
```
|
||||
ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible/mytestnet
|
||||
```
|
||||
|
||||
Voila! All your droplets now have the `tendermint` binary and required
|
||||
|
@@ -347,8 +347,10 @@ Commit are included in the header of the next block.
|
||||
- `Version (Version)`: Version of the blockchain and the application
|
||||
- `ChainID (string)`: ID of the blockchain
|
||||
- `Height (int64)`: Height of the block in the chain
|
||||
- `Time (google.protobuf.Timestamp)`: Time of the block. It is the proposer's
|
||||
local time when block was created.
|
||||
- `Time (google.protobuf.Timestamp)`: Time of the previous block.
|
||||
For heights > 1, it's the weighted median of the timestamps of the valid
|
||||
votes in the block.LastCommit.
|
||||
For height == 1, it's genesis time.
|
||||
- `NumTxs (int32)`: Number of transactions in the block
|
||||
- `TotalTxs (int64)`: Total number of transactions in the blockchain until
|
||||
now
|
||||
|
@@ -31,8 +31,13 @@ states to the latest committed state at once.
|
||||
|
||||
When `Commit` completes, it unlocks the mempool.
|
||||
|
||||
Note that it is not possible to send transactions to Tendermint during `Commit` - if your app
|
||||
tries to send a `/broadcast_tx` to Tendermint during Commit, it will deadlock.
|
||||
WARNING: if the ABCI app logic processing the `Commit` message sends a
|
||||
`/broadcast_tx_sync` or `/broadcast_tx_commit` and waits for the response
|
||||
before proceeding, it will deadlock. Executing those `broadcast_tx` calls
|
||||
involves acquiring a lock that is held during the `Commit` call, so it's not
|
||||
possible. If you make the call to the `broadcast_tx` endpoints concurrently,
|
||||
that's no problem, it just can't be part of the sequential logic of the
|
||||
`Commit` function.
|
||||
|
||||
### Consensus Connection
|
||||
|
||||
@@ -260,7 +265,7 @@ This is enforced by Tendermint consensus.
|
||||
If a block includes evidence older than this, the block will be rejected
|
||||
(validators won't vote for it).
|
||||
|
||||
Must have `0 < MaxAge`.
|
||||
Must have `MaxAge > 0`.
|
||||
|
||||
### Updates
|
||||
|
||||
|
@@ -103,7 +103,7 @@ type PartSetHeader struct {
|
||||
}
|
||||
```
|
||||
|
||||
See [MerkleRoot](/docs/spec/blockchain/encoding.md#MerkleRoot) for details.
|
||||
See [MerkleRoot](./encoding.md#MerkleRoot) for details.
|
||||
|
||||
## Time
|
||||
|
||||
@@ -163,7 +163,7 @@ a _precommit_ has `vote.Type == 2`.
|
||||
|
||||
Signatures in Tendermint are raw bytes representing the underlying signature.
|
||||
|
||||
See the [signature spec](/docs/spec/blockchain/encoding.md#key-types) for more.
|
||||
See the [signature spec](./encoding.md#key-types) for more.
|
||||
|
||||
## EvidenceData
|
||||
|
||||
@@ -190,7 +190,7 @@ type DuplicateVoteEvidence struct {
|
||||
}
|
||||
```
|
||||
|
||||
See the [pubkey spec](/docs/spec/blockchain/encoding.md#key-types) for more.
|
||||
See the [pubkey spec](./encoding.md#key-types) for more.
|
||||
|
||||
## Validation
|
||||
|
||||
@@ -209,7 +209,7 @@ the current version of the `state` corresponds to the state
|
||||
after executing transactions from the `prevBlock`.
|
||||
Elements of an object are accessed as expected,
|
||||
ie. `block.Header`.
|
||||
See the [definition of `State`](/docs/spec/blockchain/state.md).
|
||||
See the [definition of `State`](./state.md).
|
||||
|
||||
### Header
|
||||
|
||||
@@ -244,7 +244,7 @@ The height is an incrementing integer. The first block has `block.Header.Height
|
||||
### Time
|
||||
|
||||
```
|
||||
block.Header.Timestamp >= prevBlock.Header.Timestamp + 1 ms
|
||||
block.Header.Timestamp >= prevBlock.Header.Timestamp + state.consensusParams.Block.TimeIotaMs
|
||||
block.Header.Timestamp == MedianTime(block.LastCommit, state.LastValidators)
|
||||
```
|
||||
|
||||
@@ -332,6 +332,7 @@ block.ValidatorsHash == MerkleRoot(state.Validators)
|
||||
|
||||
MerkleRoot of the current validator set that is committing the block.
|
||||
This can be used to validate the `LastCommit` included in the next block.
|
||||
Note the validators are sorted by their address before computing the MerkleRoot.
|
||||
|
||||
### NextValidatorsHash
|
||||
|
||||
@@ -342,6 +343,7 @@ block.NextValidatorsHash == MerkleRoot(state.NextValidators)
|
||||
MerkleRoot of the next validator set that will be the validator set that commits the next block.
|
||||
This is included so that the current validator set gets a chance to sign the
|
||||
next validator sets Merkle root.
|
||||
Note the validators are sorted by their address before computing the MerkleRoot.
|
||||
|
||||
### ConsensusHash
|
||||
|
||||
|
@@ -339,6 +339,6 @@ type CanonicalVote struct {
|
||||
|
||||
The field ordering and the fixed sized encoding for the first three fields is optimized to ease parsing of SignBytes
|
||||
in HSMs. It creates fixed offsets for relevant fields that need to be read in this context.
|
||||
For more details, see the [signing spec](/docs/spec/consensus/signing.md).
|
||||
For more details, see the [signing spec](../consensus/signing.md).
|
||||
Also, see the motivating discussion in
|
||||
[#1622](https://github.com/tendermint/tendermint/issues/1622).
|
||||
|
@@ -1 +1 @@
|
||||
[Moved](/docs/spec/software/abci.md)
|
||||
[Moved](../software/abci.md)
|
||||
|
224
docs/spec/consensus/checkvalidators.md
Normal file
224
docs/spec/consensus/checkvalidators.md
Normal file
@@ -0,0 +1,224 @@
|
||||
# Checking Validator Set (CheckVS)
|
||||
|
||||
As part of the light client, the CheckVS procedure has to check, whether given
|
||||
two headers (whose heigts differ by more than 1), the LightClient can trust the
|
||||
newer header under the assumption it trusted the old one.
|
||||
|
||||
This document contains some math formulas. To ease reading, the file
|
||||
/tendermint/docs/spec/pdfs/checkvalidators.pdf
|
||||
displays them correctly.
|
||||
|
||||
## Definitions
|
||||
|
||||
* header fields
|
||||
- $height$
|
||||
- $bfttime$: the chain time when the header (block) was generated
|
||||
- $V$: validator set containing validators.
|
||||
- $nextV$: next validators
|
||||
* $tp$: trusting period
|
||||
* for a time $t$, the predicate $correct(v,t)$ is true if the validator $v$
|
||||
follows the protocol until time $t$ (we will see about recovery later).
|
||||
Similarly we define $faulty(v,t)$.
|
||||
* For each header $h$ it has locally stored, the LightClient stores whether
|
||||
it trusts $h$. We write $trust(h) = true$, if this is the case.
|
||||
* Validator fields. We will write a validator as a tuple $(v,p)$ such that
|
||||
+ $v$ is the identifier (we assume identifiers are unique in each validator set)
|
||||
+ $p$ is its voting power
|
||||
|
||||
## LightClient Trusting Spec
|
||||
|
||||
### LightClient Invariant
|
||||
For each LightClient $l$ and each header $h$:
|
||||
if $l$ has set $trust(h) = true$,
|
||||
then validators that are correct until time $h.bfttime + tp$ have more than two thirds of the voting power in $h.V$. (Or/and $h.nextV$)
|
||||
|
||||
Formally,
|
||||
\[\sum_{(v,p) \in h.V \wedge \\correct(v,h.bfttime + tp)} p > 2/3 \sum_{(v,p) \in h.V} p\]
|
||||
|
||||
Equivalently,
|
||||
\[\sum_{(v,p) \in h.V \wedge\\ faulty(v,h.bfttime + tp)} p < 1/3 \sum_{(v,p) \in h.V} p\]
|
||||
|
||||
**Question:** What should be the precise assumption here. Is the invariant on $h.V$ or on $h.NextV$ or both?
|
||||
|
||||
*Assumption:* If a header is properly generated, then the above equations hold.
|
||||
|
||||
### Liveness
|
||||
|
||||
*Draft:* If a header $h$ as been properly generated by the blockchain (and its age is less than the trusting period), then a correct LightClient will eventually set $trust(h) = true$.
|
||||
|
||||
|
||||
|
||||
|
||||
## High Level Solution
|
||||
|
||||
Upon initialization, the LightClient is given a header *inithead* it trusts by
|
||||
social consensus. It is assumed that *inithead* satisfies the LightClient
|
||||
Invariant.
|
||||
|
||||
When a LightClients sees a new header it has to decide whether to trust the new
|
||||
header. Trust can be obtained by (possibly) the combination of two methods.
|
||||
|
||||
1. **an uninterrupted sequence of proof.** If a block is appended to the chain, where the last block
|
||||
is trusted (and properly committed by the old validator set in the next block),
|
||||
and the new block contains a new validator set, the new block is trusted if the LightClient knows all headers in the prefix.
|
||||
Intuitively, a trusted validator set is assumed to not chose a new validator set
|
||||
that violates the fault assumption.
|
||||
|
||||
2. **trusting period.** Based on a trusted block *h*, and the LightClient
|
||||
Invariant, which ensures the fault assumption during the trusting period, we can
|
||||
try to infer wether it is guaranteed that the validator set of the new block
|
||||
contains > 2/3 correct voting power. If such a validator set commits a block, we
|
||||
can trust it, as these processes have been continuously correct by the
|
||||
invariant.
|
||||
|
||||
###Examples: for the "trusting period" method
|
||||
|
||||
* *oh*: the old trusted header
|
||||
* *nh*: the new header that has to be checked
|
||||
|
||||
Let's assume $oh.bfttime + tp > nh.bfttime$ and $oh.bfttime + tp > now$.
|
||||
In the following examples, the pairs $(v,p)$ denote validators and their voting power.
|
||||
|
||||
####Example: Identical VSets
|
||||
|
||||
\[
|
||||
oh.V = \{(1,1), ... (4,1)\}\\
|
||||
nh.V = \{(1,1), ... (4,1)\}
|
||||
\]
|
||||
|
||||
As we trust oh.V (at oh.bfttime) and the trusting period is not over yet, we
|
||||
trust nh.V.
|
||||
|
||||
####Example: Changed Voting powers
|
||||
|
||||
\[
|
||||
oh.V = \{(1,1), ... (4,1)\}\\
|
||||
nh.V = \{(1,1), ... (4,2)\}
|
||||
\]
|
||||
|
||||
|
||||
Validator 4 has more than a third voting power in nh.V. As trusting oh does not
|
||||
rule out that 4 is faulty, the fault assumption might be violated in $nh.V$. Thus, $nh.V$
|
||||
cannot be trusted.
|
||||
|
||||
####Example: Lucky case with $n> 3t +1$
|
||||
|
||||
\[
|
||||
oh.V = \{(1,1), ... (6,1)\}\\
|
||||
nh.V = \{(1,1), ... (7,1)\}
|
||||
\]
|
||||
|
||||
By the fault assumption ($n > 3t$), at most one validator in
|
||||
$oh.V$ is faulty. In addition, validator 7 may be faulty. As a result
|
||||
there are at most 2 faulty validators in $nh.V$. Because $7 > 3 \cdot 2$ we
|
||||
say that oh.V provides sufficient trust in order to trust $nh.V$.
|
||||
|
||||
####Example: Swapping validators
|
||||
|
||||
\[
|
||||
oh.V = \{(1,1), ... (4,1)\}\\
|
||||
nh.V = \{(2,1), ... (5,1)\}
|
||||
\]
|
||||
|
||||
|
||||
|
||||
|
||||
Observe that validator 1 is not present in $nh.V$. Conservatively, we have to
|
||||
assume 1 is correct, and there may be a fault among 2,3,4. In addition, we don't
|
||||
know 5, so that conservatively, we have to assume 5 may be faulty. Thus among
|
||||
2,3,4,5, there may be two faults which violates the faults assumption. Thus $oh$
|
||||
does **not** provide sufficient trust in order to trust $nh$.
|
||||
|
||||
|
||||
## Basics for the "trusting period" method
|
||||
|
||||
The function `CheckVS(oh, nh)` returns true, when *oh* provides sufficient
|
||||
trust to trust nh.
|
||||
|
||||
### Assumptions
|
||||
|
||||
1. $tp < unbonding period$.
|
||||
2. $nh.bfttime < now$
|
||||
3. $nh.bfttime < oh.bfttime+tp$
|
||||
4. $trust(oh)=true$
|
||||
|
||||
### Some Maths
|
||||
|
||||
**Observation 1.** If $oh.bfttime + tp > now$, we trust the old
|
||||
validator set $oh.V$.
|
||||
|
||||
In the following let's assume oh is trusted and sufficiently new.
|
||||
|
||||
**Definition 1.** Let $PA \subseteq oh.V$ be a *potential adversary* in $oh$, if
|
||||
the sum of the voting powers in PA is less than 1/3 of the voting
|
||||
powers in $oh.V$, that is,
|
||||
\[
|
||||
\sum_{(v,p) \in PA} p < 1/3 \sum_{(v,p) \in oh.V} p
|
||||
\]
|
||||
|
||||
**Proposition 1.** The set of faulty processes
|
||||
\[oh.V \setminus \{(v,p): (v,p) \in oh.V \wedge correct(v,oh.bfttime + tp)\}\]
|
||||
is a potential adversary.
|
||||
|
||||
*Proof.* By the LightClient invariant.
|
||||
|
||||
**Definition 2.** Let the *unknown validators* UV be the validators that appear in $nh.V$ and not in
|
||||
$oh.V$, that is,
|
||||
\[
|
||||
UV = \{(v,p): (v,p) \in nh.V \wedge \nexists (v,x) \in oh.V \}.
|
||||
\]
|
||||
|
||||
|
||||
|
||||
|
||||
**Theorem 1.** If for all potential adversaries PA, in $nh$ the combined voting
|
||||
powers of PA and UV is less than a third of the total voting power, then in
|
||||
$nh$, more than 2/3 of the voting power is held by correct processes. Formally,
|
||||
if for all PA
|
||||
|
||||
\[
|
||||
\sum_{(v,old) \in PA \wedge \\ (v,p) \in nh.V} p + \sum_{(v,p) \in UV} p < 1/3
|
||||
\sum_{(v,p) \in nh.V} p,
|
||||
\]
|
||||
then
|
||||
\[
|
||||
\sum_{(v,p) \in nh.V \wedge \\correct(v,oh.bfttime + tp)} p > 2/3 \sum_{(v,p) \in h.V} p
|
||||
\]
|
||||
|
||||
*Proof.* By the definition of PA, Proposition 1, and the LightClient invariant.
|
||||
|
||||
By Assumption 3, there is sufficient voting power to trust the new validator set. (And thus the validator set it signs in that block, for which the $tp$ starts at the $bfttime$ of the header).
|
||||
|
||||
Below, we thus sketch a function that checks whether the premise of Theorem 1 holds. If the results is positive, we can trust $nh$, otherwise not.
|
||||
|
||||
|
||||
|
||||
### An Algorithm
|
||||
|
||||
In pseudo go...
|
||||
|
||||
```go
|
||||
func CheckVS(oh, nh) bool {
|
||||
if oh.bfttime + unbonding_period < now { // Observation 1
|
||||
return false // old header was once trusted but it is expired
|
||||
}
|
||||
|
||||
PAs := compute_all_PAs(oh) // Definition 1
|
||||
PAs := reduce (PAs) // remove every PA that is a subset of another PA
|
||||
UV := compute_UV(oh,nh) // Definition 2
|
||||
|
||||
vpUV := votingpower(UV,nh) // second sum in Theorem 1
|
||||
vpNH := votingpower(nh.V,nh) // right hand side of premise of Theorem 1
|
||||
vpMaxPA := maximumvotingpower(PAs,nh) // voting powers of all PA and big max
|
||||
|
||||
return vpMaxPA + vpUV < 1/3 * vpNH // Theorem 1. It must be smaller for all
|
||||
// so it must be smaller for the max
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Remarks
|
||||
|
||||
**Remark.** Computing all PAs might be too expensive (all subsets of $oh.V$ that have a certain combined voting power in oh). Similarly, we then have to compute all voting powers of PAs in nh to get the maximum. This is disturbing, as right now, based on the examples, I expect that CheckVS will mostly return false, assuming that there are frequent changes in the validator sets. However, $oh.V=nh.V$ might be the common case.
|
||||
|
||||
**To Do.** The current invariant assumes that the 1/3 fault assumption is always satisfied. If this is not the case, and there is slashing, etc., we should write the spec of the fault assumptions with temporary violations. Cf. fork accountability, slashing, "counter factual signing" etc.
|
@@ -1 +1 @@
|
||||
[Moved](/docs/spec/software/wal.md)
|
||||
[Moved](../software/wal.md)
|
||||
|
@@ -12,14 +12,14 @@ and upon incoming connection shares some peers and disconnects.
|
||||
|
||||
## Seeds
|
||||
|
||||
`--p2p.seeds “1.2.3.4:26656,2.3.4.5:4444”`
|
||||
`--p2p.seeds “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:4444”`
|
||||
|
||||
Dials these seeds when we need more peers. They should return a list of peers and then disconnect.
|
||||
If we already have enough peers in the address book, we may never need to dial them.
|
||||
|
||||
## Persistent Peers
|
||||
|
||||
`--p2p.persistent_peers “1.2.3.4:26656,2.3.4.5:26656”`
|
||||
`--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”`
|
||||
|
||||
Dial these peers and auto-redial them if the connection fails.
|
||||
These are intended to be trusted persistent peers that can help
|
||||
@@ -30,9 +30,9 @@ backoff and will give up after a day of trying to connect.
|
||||
the user will be warned that seeds may auto-close connections
|
||||
and that the node may not be able to keep the connection persistent.
|
||||
|
||||
## Private Persistent Peers
|
||||
## Private Peers
|
||||
|
||||
`--p2p.private_persistent_peers “1.2.3.4:26656,2.3.4.5:26656”`
|
||||
`--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”`
|
||||
|
||||
These are persistent peers that we do not add to the address book or
|
||||
gossip to other peers. They stay private to us.
|
||||
These are IDs of the peers that we do not add to the address book or gossip to
|
||||
other peers. They stay private to us.
|
||||
|
BIN
docs/spec/pdfs/checkvalidators.pdf
Normal file
BIN
docs/spec/pdfs/checkvalidators.pdf
Normal file
Binary file not shown.
@@ -2,45 +2,290 @@
|
||||
|
||||
This document specifies the Proposer Selection Procedure that is used in Tendermint to choose a round proposer.
|
||||
As Tendermint is “leader-based protocol”, the proposer selection is critical for its correct functioning.
|
||||
Let denote with `proposer_p(h,r)` a process returned by the Proposer Selection Procedure at the process p, at height h
|
||||
and round r. Then the Proposer Selection procedure should fulfill the following properties:
|
||||
|
||||
`Agreement`: Given a validator set V, and two honest validators,
|
||||
p and q, for each height h, and each round r,
|
||||
proposer_p(h,r) = proposer_q(h,r)
|
||||
At a given block height, the proposer selection algorithm runs with the same validator set at each round .
|
||||
Between heights, an updated validator set may be specified by the application as part of the ABCIResponses' EndBlock.
|
||||
|
||||
`Liveness`: In every consecutive sequence of rounds of size K (K is system parameter), at least a
|
||||
single round has an honest proposer.
|
||||
## Requirements for Proposer Selection
|
||||
|
||||
`Fairness`: The proposer selection is proportional to the validator voting power, i.e., a validator with more
|
||||
voting power is selected more frequently, proportional to its power. More precisely, given a set of processes
|
||||
with the total voting power N, during a sequence of rounds of size N, every process is proposer in a number of rounds
|
||||
equal to its voting power.
|
||||
This sections covers the requirements with Rx being mandatory and Ox optional requirements.
|
||||
The following requirements must be met by the Proposer Selection procedure:
|
||||
|
||||
We now look at a few particular cases to understand better how fairness should be implemented.
|
||||
If we have 4 processes with the following voting power distribution (p0,4), (p1, 2), (p2, 2), (p3, 2) at some round r,
|
||||
we have the following sequence of proposer selections in the following rounds:
|
||||
#### R1: Determinism
|
||||
Given a validator set `V`, and two honest validators `p` and `q`, for each height `h` and each round `r` the following must hold:
|
||||
|
||||
`p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, etc`
|
||||
`proposer_p(h,r) = proposer_q(h,r)`
|
||||
|
||||
Let consider now the following scenario where a total voting power of faulty processes is aggregated in a single process
|
||||
p0: (p0,3), (p1, 1), (p2, 1), (p3, 1), (p4, 1), (p5, 1), (p6, 1), (p7, 1).
|
||||
In this case the sequence of proposer selections looks like this:
|
||||
where `proposer_p(h,r)` is the proposer returned by the Proposer Selection Procedure at process `p`, at height `h` and round `r`.
|
||||
|
||||
`p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, etc`
|
||||
#### R2: Fairness
|
||||
Given a validator set with total voting power P and a sequence S of elections. In any sub-sequence of S with length C*P, a validator v must be elected as proposer P/VP(v) times, i.e. with frequency:
|
||||
|
||||
In this case, we see that a number of rounds coordinated by a faulty process is proportional to its voting power.
|
||||
We consider also the case where we have voting power uniformly distributed among processes, i.e., we have 10 processes
|
||||
each with voting power of 1. And let consider that there are 3 faulty processes with consecutive addresses,
|
||||
for example the first 3 processes are faulty. Then the sequence looks like this:
|
||||
f(v) ~ VP(v) / P
|
||||
|
||||
`p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, etc`
|
||||
where C is a tolerance factor for validator set changes with following values:
|
||||
- C == 1 if there are no validator set changes
|
||||
- C ~ k when there are validator changes
|
||||
|
||||
In this case, we have 3 consecutive rounds with a faulty proposer.
|
||||
One special case we consider is the case where a single honest process p0 has most of the voting power, for example:
|
||||
(p0,100), (p1, 2), (p2, 3), (p3, 4). Then the sequence of proposer selection looks like this:
|
||||
*[this needs more work]*
|
||||
|
||||
p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p1, p0, p0, p0, p0, p0, etc
|
||||
### Basic Algorithm
|
||||
|
||||
This basically means that almost all rounds have the same proposer. But in this case, the process p0 has anyway enough
|
||||
voting power to decide whatever he wants, so the fact that he coordinates almost all rounds seems correct.
|
||||
At its core, the proposer selection procedure uses a weighted round-robin algorithm.
|
||||
|
||||
A model that gives a good intuition on how/ why the selection algorithm works and it is fair is that of a priority queue. The validators move ahead in this queue according to their voting power (the higher the voting power the faster a validator moves towards the head of the queue). When the algorithm runs the following happens:
|
||||
- all validators move "ahead" according to their powers: for each validator, increase the priority by the voting power
|
||||
- first in the queue becomes the proposer: select the validator with highest priority
|
||||
- move the proposer back in the queue: decrease the proposer's priority by the total voting power
|
||||
|
||||
Notation:
|
||||
- vset - the validator set
|
||||
- n - the number of validators
|
||||
- VP(i) - voting power of validator i
|
||||
- A(i) - accumulated priority for validator i
|
||||
- P - total voting power of set
|
||||
- avg - average of all validator priorities
|
||||
- prop - proposer
|
||||
|
||||
Simple view at the Selection Algorithm:
|
||||
|
||||
```
|
||||
def ProposerSelection (vset):
|
||||
|
||||
// compute priorities and elect proposer
|
||||
for each validator i in vset:
|
||||
A(i) += VP(i)
|
||||
prop = max(A)
|
||||
A(prop) -= P
|
||||
```
|
||||
|
||||
### Stable Set
|
||||
|
||||
Consider the validator set:
|
||||
|
||||
Validator | p1| p2
|
||||
----------|---|---
|
||||
VP | 1 | 3
|
||||
|
||||
Assuming no validator changes, the following table shows the proposer priority computation over a few runs. Four runs of the selection procedure are shown, starting with the 5th the same values are computed.
|
||||
Each row shows the priority queue and the process place in it. The proposer is the closest to the head, the rightmost validator. As priorities are updated, the validators move right in the queue. The proposer moves left as its priority is reduced after election.
|
||||
|
||||
|Priority Run | -2| -1| 0 | 1| 2 | 3 | 4 | 5 | Alg step
|
||||
|--------------- |---|---|---- |---|---- |---|---|---|--------
|
||||
| | | |p1,p2| | | | | |Initialized to 0
|
||||
|run 1 | | | | p1| | p2| | |A(i)+=VP(i)
|
||||
| | | p2| | p1| | | | |A(p2)-= P
|
||||
|run 2 | | | | |p1,p2| | | |A(i)+=VP(i)
|
||||
| | p1| | | | p2| | | |A(p1)-= P
|
||||
|run 3 | | p1| | | | | | p2|A(i)+=VP(i)
|
||||
| | | p1| | p2| | | | |A(p2)-= P
|
||||
|run 4 | | | p1| | | | p2| |A(i)+=VP(i)
|
||||
| | | |p1,p2| | | | | |A(p2)-= P
|
||||
|
||||
It can be shown that:
|
||||
- At the end of each run k+1 the sum of the priorities is the same as at end of run k. If a new set's priorities are initialized to 0 then the sum of priorities will be 0 at each run while there are no changes.
|
||||
- The max distance between priorites is (n-1) * P. *[formal proof not finished]*
|
||||
|
||||
### Validator Set Changes
|
||||
Between proposer selection runs the validator set may change. Some changes have implications on the proposer election.
|
||||
|
||||
#### Voting Power Change
|
||||
Consider again the earlier example and assume that the voting power of p1 is changed to 4:
|
||||
|
||||
Validator | p1| p2
|
||||
----------|---| ---
|
||||
VP | 4 | 3
|
||||
|
||||
Let's also assume that before this change the proposer priorites were as shown in first row (last run). As it can be seen, the selection could run again, without changes, as before.
|
||||
|
||||
|Priority Run| -2 | -1 | 0 | 1 | 2 | Comment
|
||||
|--------------| ---|--- |------|--- |--- |--------
|
||||
| last run | | p2 | | p1 | |__update VP(p1)__
|
||||
| next run | | | | | p2 |A(i)+=VP(i)
|
||||
| | p1 | | | | p2 |A(p1)-= P
|
||||
|
||||
However, when a validator changes power from a high to a low value, some other validator remain far back in the queue for a long time. This scenario is considered again in the Proposer Priority Range section.
|
||||
|
||||
As before:
|
||||
- At the end of each run k+1 the sum of the priorities is the same as at run k.
|
||||
- The max distance between priorites is (n-1) * P.
|
||||
|
||||
#### Validator Removal
|
||||
Consider a new example with set:
|
||||
|
||||
Validator | p1 | p2 | p3 |
|
||||
--------- |--- |--- |--- |
|
||||
VP | 1 | 2 | 3 |
|
||||
|
||||
Let's assume that after the last run the proposer priorities were as shown in first row with their sum being 0. After p2 is removed, at the end of next proposer selection run (penultimate row) the sum of priorities is -2 (minus the priority of the removed process).
|
||||
|
||||
The procedure could continue without modifications. However, after a sufficiently large number of modifications in validator set, the priority values would migrate towards maximum or minimum allowed values causing truncations due to overflow detection.
|
||||
For this reason, the selection procedure adds another __new step__ that centers the current priority values such that the priority sum remains close to 0.
|
||||
|
||||
|Priority Run |-3 | -2 | -1 | 0 | 1 | 2 | 4 |Comment
|
||||
|--------------- |--- | ---|--- |--- |--- |--- |---|--------
|
||||
| last run |p3 | | | | p1 | p2 | |__remove p2__
|
||||
| nextrun | | | | | | | |
|
||||
| __new step__ | | p3 | | | | p1 | |A(i) -= avg, avg = -1
|
||||
| | | | | | p3 | p1 | |A(i)+=VP(i)
|
||||
| | | | p1 | | p3 | | |A(p1)-= P
|
||||
|
||||
The modified selection algorithm is:
|
||||
|
||||
def ProposerSelection (vset):
|
||||
|
||||
// center priorities around zero
|
||||
avg = sum(A(i) for i in vset)/len(vset)
|
||||
for each validator i in vset:
|
||||
A(i) -= avg
|
||||
|
||||
// compute priorities and elect proposer
|
||||
for each validator i in vset:
|
||||
A(i) += VP(i)
|
||||
prop = max(A)
|
||||
A(prop) -= P
|
||||
|
||||
Observations:
|
||||
- The sum of priorities is now close to 0. Due to integer division the sum is an integer in (-n, n), where n is the number of validators.
|
||||
|
||||
#### New Validator
|
||||
When a new validator is added, same problem as the one described for removal appears, the sum of priorities in the new set is not zero. This is fixed with the centering step introduced above.
|
||||
|
||||
One other issue that needs to be addressed is the following. A validator V that has just been elected is moved to the end of the queue. If the validator set is large and/ or other validators have significantly higher power, V will have to wait many runs to be elected. If V removes and re-adds itself to the set, it would make a significant (albeit unfair) "jump" ahead in the queue.
|
||||
|
||||
In order to prevent this, when a new validator is added, its initial priority is set to:
|
||||
|
||||
A(V) = -1.125 * P
|
||||
|
||||
where P is the total voting power of the set including V.
|
||||
|
||||
Curent implementation uses the penalty factor of 1.125 because it provides a small punishment that is efficient to calculate. See [here](https://github.com/tendermint/tendermint/pull/2785#discussion_r235038971) for more details.
|
||||
|
||||
If we consider the validator set where p3 has just been added:
|
||||
|
||||
Validator | p1 | p2 | p3
|
||||
----------|--- |--- |---
|
||||
VP | 1 | 3 | 8
|
||||
|
||||
then p3 will start with proposer priority:
|
||||
|
||||
A(p3) = -1.125 * (1 + 3 + 8) ~ -13
|
||||
|
||||
Note that since current computation uses integer division there is penalty loss when sum of the voting power is less than 8.
|
||||
|
||||
In the next run, p3 will still be ahead in the queue, elected as proposer and moved back in the queue.
|
||||
|
||||
|Priority Run |-13 | -9 | -5 | -2 | -1 | 0 | 1 | 2 | 5 | 6 | 7 |Alg step
|
||||
|---------------|--- |--- |--- |----|--- |--- |---|---|---|---|---|--------
|
||||
|last run | | | | p2 | | | | p1| | | |__add p3__
|
||||
| | p3 | | | p2 | | | | p1| | | |A(p3) = -4
|
||||
|next run | | p3 | | | | | | p2| | p1| |A(i) -= avg, avg = -4
|
||||
| | | | | | p3 | | | | p2| | p1|A(i)+=VP(i)
|
||||
| | | | p1 | | p3 | | | | p2| | |A(p1)-=P
|
||||
|
||||
### Proposer Priority Range
|
||||
With the introduction of centering, some interesting cases occur. Low power validators that bind early in a set that includes high power validator(s) benefit from subsequent additions to the set. This is because these early validators run through more right shift operations during centering, operations that increase their priority.
|
||||
|
||||
As an example, consider the set where p2 is added after p1, with priority -1.125 * 80k = -90k. After the selection procedure runs once:
|
||||
|
||||
Validator | p1 | p2 | Comment
|
||||
----------|-----|---- |---
|
||||
VP | 80k | 10 |
|
||||
A | 0 |-90k | __added p2__
|
||||
A |-45k | 45k | __run selection__
|
||||
|
||||
Then execute the following steps:
|
||||
|
||||
1. Add a new validator p3:
|
||||
|
||||
Validator | p1 | p2 | p3
|
||||
----------|-----|--- |----
|
||||
VP | 80k | 10 | 10
|
||||
|
||||
2. Run selection once. The notation '..p'/'p..' means very small deviations compared to column priority.
|
||||
|
||||
|Priority Run | -90k..| -60k | -45k | -15k| 0 | 45k | 75k | 155k | Comment
|
||||
|--------------|------ |----- |------- |---- |---|---- |----- |------- |---------
|
||||
| last run | p3 | | p2 | | | p1 | | | __added p3__
|
||||
| next run
|
||||
| *right_shift*| | p3 | | p2 | | | p1 | | A(i) -= avg,avg=-30k
|
||||
| | | ..p3| | ..p2| | | | p1 | A(i)+=VP(i)
|
||||
| | | ..p3| | ..p2| | | p1.. | | A(p1)-=P, P=80k+20
|
||||
|
||||
|
||||
3. Remove p1 and run selection once:
|
||||
|
||||
Validator | p3 | p2 | Comment
|
||||
----------|----- |---- |--------
|
||||
VP | 10 | 10 |
|
||||
A |-60k |-15k |
|
||||
A |-22.5k|22.5k| __run selection__
|
||||
|
||||
At this point, while the total voting power is 20, the distance between priorities is 45k. It will take 4500 runs for p3 to catch up with p2.
|
||||
|
||||
In order to prevent these types of scenarios, the selection algorithm performs scaling of priorities such that the difference between min and max values is smaller than two times the total voting power.
|
||||
|
||||
The modified selection algorithm is:
|
||||
|
||||
def ProposerSelection (vset):
|
||||
|
||||
// scale the priority values
|
||||
diff = max(A)-min(A)
|
||||
threshold = 2 * P
|
||||
if diff > threshold:
|
||||
scale = diff/threshold
|
||||
for each validator i in vset:
|
||||
A(i) = A(i)/scale
|
||||
|
||||
// center priorities around zero
|
||||
avg = sum(A(i) for i in vset)/len(vset)
|
||||
for each validator i in vset:
|
||||
A(i) -= avg
|
||||
|
||||
// compute priorities and elect proposer
|
||||
for each validator i in vset:
|
||||
A(i) += VP(i)
|
||||
prop = max(A)
|
||||
A(prop) -= P
|
||||
|
||||
Observations:
|
||||
- With this modification, the maximum distance between priorites becomes 2 * P.
|
||||
|
||||
Note also that even during steady state the priority range may increase beyond 2 * P. The scaling introduced here helps to keep the range bounded.
|
||||
|
||||
### Wrinkles
|
||||
|
||||
#### Validator Power Overflow Conditions
|
||||
The validator voting power is a positive number stored as an int64. When a validator is added the `1.125 * P` computation must not overflow. As a consequence the code handling validator updates (add and update) checks for overflow conditions making sure the total voting power is never larger than the largest int64 `MAX`, with the property that `1.125 * MAX` is still in the bounds of int64. Fatal error is return when overflow condition is detected.
|
||||
|
||||
#### Proposer Priority Overflow/ Underflow Handling
|
||||
The proposer priority is stored as an int64. The selection algorithm performs additions and subtractions to these values and in the case of overflows and underflows it limits the values to:
|
||||
|
||||
MaxInt64 = 1 << 63 - 1
|
||||
MinInt64 = -1 << 63
|
||||
|
||||
### Requirement Fulfillment Claims
|
||||
__[R1]__
|
||||
|
||||
The proposer algorithm is deterministic giving consistent results across executions with same transactions and validator set modifications.
|
||||
[WIP - needs more detail]
|
||||
|
||||
__[R2]__
|
||||
|
||||
Given a set of processes with the total voting power P, during a sequence of elections of length P, the number of times any process is selected as proposer is equal to its voting power. The sequence of the P proposers then repeats. If we consider the validator set:
|
||||
|
||||
Validator | p1| p2
|
||||
----------|---|---
|
||||
VP | 1 | 3
|
||||
|
||||
With no other changes to the validator set, the current implementation of proposer selection generates the sequence:
|
||||
`p2, p1, p2, p2, p2, p1, p2, p2,...` or [`p2, p1, p2, p2`]*
|
||||
A sequence that starts with any circular permutation of the [`p2, p1, p2, p2`] sub-sequence would also provide the same degree of fairness. In fact these circular permutations show in the sliding window (over the generated sequence) of size equal to the length of the sub-sequence.
|
||||
|
||||
Assigning priorities to each validator based on the voting power and updating them at each run ensures the fairness of the proposer selection. In addition, every time a validator is elected as proposer its priority is decreased with the total voting power.
|
||||
|
||||
Intuitively, a process v jumps ahead in the queue at most (max(A) - min(A))/VP(v) times until it reaches the head and is elected. The frequency is then:
|
||||
|
||||
f(v) ~ VP(v)/(max(A)-min(A)) = 1/k * VP(v)/P
|
||||
|
||||
For current implementation, this means v should be proposer at least VP(v) times out of k * P runs, with scaling factor k=2.
|
||||
|
@@ -12,3 +12,11 @@ for details.
|
||||
|
||||
Sending incorrectly encoded data or data exceeding `maxMsgSize` will result
|
||||
in stopping the peer.
|
||||
|
||||
The mempool will not send a tx back to any peer which it received it from.
|
||||
|
||||
The reactor assigns an `uint16` number for each peer and maintains a map from
|
||||
p2p.ID to `uint16`. Each mempool transaction carries a list of all the senders
|
||||
(`[]uint16`). The list is updated every time mempool receives a transaction it
|
||||
is already seen. `uint16` assumes that a node will never have over 65535 active
|
||||
peers (0 is reserved for unknown source - e.g. RPC).
|
||||
|
@@ -21,17 +21,20 @@ inbound (they dialed our public address) or outbound (we dialed them).
|
||||
## Discovery
|
||||
|
||||
Peer discovery begins with a list of seeds.
|
||||
When we have no peers, or have been unable to find enough peers from existing ones,
|
||||
we dial a randomly selected seed to get a list of peers to dial.
|
||||
|
||||
When we don't have enough peers, we
|
||||
|
||||
1. ask existing peers
|
||||
2. dial seeds if we're not dialing anyone currently
|
||||
|
||||
On startup, we will also immediately dial the given list of `persistent_peers`,
|
||||
and will attempt to maintain persistent connections with them. If the connections die, or we fail to dial,
|
||||
we will redial every 5s for a few minutes, then switch to an exponential backoff schedule,
|
||||
and after about a day of trying, stop dialing the peer.
|
||||
and will attempt to maintain persistent connections with them. If the
|
||||
connections die, or we fail to dial, we will redial every 5s for a few minutes,
|
||||
then switch to an exponential backoff schedule, and after about a day of
|
||||
trying, stop dialing the peer.
|
||||
|
||||
So long as we have less than `MaxNumOutboundPeers`, we periodically request additional peers
|
||||
from each of our own. If sufficient time goes by and we still can't find enough peers,
|
||||
we try the seeds again.
|
||||
As long as we have less than `MaxNumOutboundPeers`, we periodically request
|
||||
additional peers from each of our own and try seeds.
|
||||
|
||||
## Listening
|
||||
|
||||
|
@@ -30,8 +30,19 @@ moniker = "anonymous"
|
||||
# and verifying their commits
|
||||
fast_sync = true
|
||||
|
||||
# Database backend: leveldb | memdb | cleveldb
|
||||
db_backend = "leveldb"
|
||||
# Database backend: goleveldb | cleveldb | boltdb
|
||||
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
||||
# - pure go
|
||||
# - stable
|
||||
# * cleveldb (uses levigo wrapper)
|
||||
# - fast
|
||||
# - requires gcc
|
||||
# - use cleveldb build tag (go build -tags cleveldb)
|
||||
# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
|
||||
# - EXPERIMENTAL
|
||||
# - may be faster is some use-cases (random reads - indexer)
|
||||
# - use boltdb build tag (go build -tags boltdb)
|
||||
db_backend = "goleveldb"
|
||||
|
||||
# Database directory
|
||||
db_dir = "data"
|
||||
@@ -127,6 +138,17 @@ max_subscriptions_per_client = 5
|
||||
# See https://github.com/tendermint/tendermint/issues/3435
|
||||
timeout_broadcast_tx_commit = "10s"
|
||||
|
||||
# The name of a file containing certificate that is used to create the HTTPS server.
|
||||
# If the certificate is signed by a certificate authority,
|
||||
# the certFile should be the concatenation of the server's certificate, any intermediates,
|
||||
# and the CA's certificate.
|
||||
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
|
||||
tls_cert_file = ""
|
||||
|
||||
# The name of a file containing matching private key that is used to create the HTTPS server.
|
||||
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
|
||||
tls_key_file = ""
|
||||
|
||||
##### peer to peer configuration options #####
|
||||
[p2p]
|
||||
|
||||
|
@@ -8,7 +8,7 @@ key-value database. Unfortunately, this implementation of LevelDB seems to suffe
|
||||
install the real C-implementation of LevelDB and compile Tendermint to use
|
||||
that using `make build_c`. See the [install instructions](../introduction/install.md) for details.
|
||||
|
||||
Tendermint keeps multiple distinct LevelDB databases in the `$TMROOT/data`:
|
||||
Tendermint keeps multiple distinct databases in the `$TMROOT/data`:
|
||||
|
||||
- `blockstore.db`: Keeps the entire blockchain - stores blocks,
|
||||
block commits, and block meta data, each indexed by height. Used to sync new
|
||||
|
@@ -60,11 +60,6 @@ func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) {
|
||||
go evR.broadcastEvidenceRoutine(peer)
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor.
|
||||
func (evR *EvidenceReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
// Receive implements Reactor.
|
||||
// It adds any received evidence to the evpool.
|
||||
func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
|
@@ -13,7 +13,7 @@ CLI wraps the `cobra` and `viper` packages and handles some common elements of b
|
||||
|
||||
## clist
|
||||
|
||||
Clist provides a linekd list that is safe for concurrent access by many readers.
|
||||
Clist provides a linked list that is safe for concurrent access by many readers.
|
||||
|
||||
## common
|
||||
|
||||
|
@@ -331,172 +331,6 @@ func (g *Group) NewReader(index int) (*GroupReader, error) {
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Returns -1 if line comes after, 0 if found, 1 if line comes before.
|
||||
type SearchFunc func(line string) (int, error)
|
||||
|
||||
// Searches for the right file in Group, then returns a GroupReader to start
|
||||
// streaming lines.
|
||||
// Returns true if an exact match was found, otherwise returns the next greater
|
||||
// line that starts with prefix.
|
||||
// CONTRACT: Caller must close the returned GroupReader
|
||||
func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error) {
|
||||
g.mtx.Lock()
|
||||
minIndex, maxIndex := g.minIndex, g.maxIndex
|
||||
g.mtx.Unlock()
|
||||
// Now minIndex/maxIndex may change meanwhile,
|
||||
// but it shouldn't be a big deal
|
||||
// (maybe we'll want to limit scanUntil though)
|
||||
|
||||
for {
|
||||
curIndex := (minIndex + maxIndex + 1) / 2
|
||||
|
||||
// Base case, when there's only 1 choice left.
|
||||
if minIndex == maxIndex {
|
||||
r, err := g.NewReader(maxIndex)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
match, err := scanUntil(r, prefix, cmp)
|
||||
if err != nil {
|
||||
r.Close()
|
||||
return nil, false, err
|
||||
}
|
||||
return r, match, err
|
||||
}
|
||||
|
||||
// Read starting roughly at the middle file,
|
||||
// until we find line that has prefix.
|
||||
r, err := g.NewReader(curIndex)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
foundIndex, line, err := scanNext(r, prefix)
|
||||
r.Close()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// Compare this line to our search query.
|
||||
val, err := cmp(line)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if val < 0 {
|
||||
// Line will come later
|
||||
minIndex = foundIndex
|
||||
} else if val == 0 {
|
||||
// Stroke of luck, found the line
|
||||
r, err := g.NewReader(foundIndex)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
match, err := scanUntil(r, prefix, cmp)
|
||||
if !match {
|
||||
panic("Expected match to be true")
|
||||
}
|
||||
if err != nil {
|
||||
r.Close()
|
||||
return nil, false, err
|
||||
}
|
||||
return r, true, err
|
||||
} else {
|
||||
// We passed it
|
||||
maxIndex = curIndex - 1
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Scans and returns the first line that starts with 'prefix'
|
||||
// Consumes line and returns it.
|
||||
func scanNext(r *GroupReader, prefix string) (int, string, error) {
|
||||
for {
|
||||
line, err := r.ReadLine()
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
if !strings.HasPrefix(line, prefix) {
|
||||
continue
|
||||
}
|
||||
index := r.CurIndex()
|
||||
return index, line, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true iff an exact match was found.
|
||||
// Pushes line, does not consume it.
|
||||
func scanUntil(r *GroupReader, prefix string, cmp SearchFunc) (bool, error) {
|
||||
for {
|
||||
line, err := r.ReadLine()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !strings.HasPrefix(line, prefix) {
|
||||
continue
|
||||
}
|
||||
val, err := cmp(line)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if val < 0 {
|
||||
continue
|
||||
} else if val == 0 {
|
||||
r.PushLine(line)
|
||||
return true, nil
|
||||
} else {
|
||||
r.PushLine(line)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Searches backwards for the last line in Group with prefix.
|
||||
// Scans each file forward until the end to find the last match.
|
||||
func (g *Group) FindLast(prefix string) (match string, found bool, err error) {
|
||||
g.mtx.Lock()
|
||||
minIndex, maxIndex := g.minIndex, g.maxIndex
|
||||
g.mtx.Unlock()
|
||||
|
||||
r, err := g.NewReader(maxIndex)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Open files from the back and read
|
||||
GROUP_LOOP:
|
||||
for i := maxIndex; i >= minIndex; i-- {
|
||||
err := r.SetIndex(i)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
// Scan each line and test whether line matches
|
||||
for {
|
||||
line, err := r.ReadLine()
|
||||
if err == io.EOF {
|
||||
if found {
|
||||
return match, found, nil
|
||||
}
|
||||
continue GROUP_LOOP
|
||||
} else if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
if strings.HasPrefix(line, prefix) {
|
||||
match = line
|
||||
found = true
|
||||
}
|
||||
if r.CurIndex() > i {
|
||||
if found {
|
||||
return match, found, nil
|
||||
}
|
||||
continue GROUP_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GroupInfo holds information about the group.
|
||||
type GroupInfo struct {
|
||||
MinIndex int // index of the first file in the group, including head
|
||||
@@ -654,48 +488,6 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// ReadLine reads a line (without delimiter).
|
||||
// just return io.EOF if no new lines found.
|
||||
func (gr *GroupReader) ReadLine() (string, error) {
|
||||
gr.mtx.Lock()
|
||||
defer gr.mtx.Unlock()
|
||||
|
||||
// From PushLine
|
||||
if gr.curLine != nil {
|
||||
line := string(gr.curLine)
|
||||
gr.curLine = nil
|
||||
return line, nil
|
||||
}
|
||||
|
||||
// Open file if not open yet
|
||||
if gr.curReader == nil {
|
||||
err := gr.openFile(gr.curIndex)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over files until line is found
|
||||
var linePrefix string
|
||||
for {
|
||||
bytesRead, err := gr.curReader.ReadBytes('\n')
|
||||
if err == io.EOF {
|
||||
// Open the next file
|
||||
if err1 := gr.openFile(gr.curIndex + 1); err1 != nil {
|
||||
return "", err1
|
||||
}
|
||||
if len(bytesRead) > 0 && bytesRead[len(bytesRead)-1] == byte('\n') {
|
||||
return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil
|
||||
}
|
||||
linePrefix += string(bytesRead)
|
||||
continue
|
||||
} else if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil
|
||||
}
|
||||
}
|
||||
|
||||
// IF index > gr.Group.maxIndex, returns io.EOF
|
||||
// CONTRACT: caller should hold gr.mtx
|
||||
func (gr *GroupReader) openFile(index int) error {
|
||||
@@ -725,20 +517,6 @@ func (gr *GroupReader) openFile(index int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PushLine makes the given line the current one, so the next time somebody
|
||||
// calls ReadLine, this line will be returned.
|
||||
// panics if called twice without calling ReadLine.
|
||||
func (gr *GroupReader) PushLine(line string) {
|
||||
gr.mtx.Lock()
|
||||
defer gr.mtx.Unlock()
|
||||
|
||||
if gr.curLine == nil {
|
||||
gr.curLine = []byte(line)
|
||||
} else {
|
||||
panic("PushLine failed, already have line")
|
||||
}
|
||||
}
|
||||
|
||||
// CurIndex returns cursor's file index.
|
||||
func (gr *GroupReader) CurIndex() int {
|
||||
gr.mtx.Lock()
|
||||
@@ -753,32 +531,3 @@ func (gr *GroupReader) SetIndex(index int) error {
|
||||
defer gr.mtx.Unlock()
|
||||
return gr.openFile(index)
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
// A simple SearchFunc that assumes that the marker is of form
|
||||
// <prefix><number>.
|
||||
// For example, if prefix is '#HEIGHT:', the markers of expected to be of the form:
|
||||
//
|
||||
// #HEIGHT:1
|
||||
// ...
|
||||
// #HEIGHT:2
|
||||
// ...
|
||||
func MakeSimpleSearchFunc(prefix string, target int) SearchFunc {
|
||||
return func(line string) (int, error) {
|
||||
if !strings.HasPrefix(line, prefix) {
|
||||
return -1, fmt.Errorf("Marker line did not have prefix: %v", prefix)
|
||||
}
|
||||
i, err := strconv.Atoi(line[len(prefix):])
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("Failed to parse marker line: %v", err.Error())
|
||||
}
|
||||
if target < i {
|
||||
return 1, nil
|
||||
} else if target == i {
|
||||
return 0, nil
|
||||
} else {
|
||||
return -1, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,13 +1,9 @@
|
||||
package autofile
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -106,107 +102,6 @@ func TestCheckHeadSizeLimit(t *testing.T) {
|
||||
destroyTestGroup(t, g)
|
||||
}
|
||||
|
||||
func TestSearch(t *testing.T) {
|
||||
g := createTestGroupWithHeadSizeLimit(t, 10*1000)
|
||||
|
||||
// Create some files in the group that have several INFO lines in them.
|
||||
// Try to put the INFO lines in various spots.
|
||||
for i := 0; i < 100; i++ {
|
||||
// The random junk at the end ensures that this INFO linen
|
||||
// is equally likely to show up at the end.
|
||||
_, err := g.Head.Write([]byte(fmt.Sprintf("INFO %v %v\n", i, cmn.RandStr(123))))
|
||||
require.NoError(t, err, "Failed to write to head")
|
||||
g.checkHeadSizeLimit()
|
||||
for j := 0; j < 10; j++ {
|
||||
_, err1 := g.Head.Write([]byte(cmn.RandStr(123) + "\n"))
|
||||
require.NoError(t, err1, "Failed to write to head")
|
||||
g.checkHeadSizeLimit()
|
||||
}
|
||||
}
|
||||
|
||||
// Create a search func that searches for line
|
||||
makeSearchFunc := func(target int) SearchFunc {
|
||||
return func(line string) (int, error) {
|
||||
parts := strings.Split(line, " ")
|
||||
if len(parts) != 3 {
|
||||
return -1, errors.New("Line did not have 3 parts")
|
||||
}
|
||||
i, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return -1, errors.New("Failed to parse INFO: " + err.Error())
|
||||
}
|
||||
if target < i {
|
||||
return 1, nil
|
||||
} else if target == i {
|
||||
return 0, nil
|
||||
} else {
|
||||
return -1, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now search for each number
|
||||
for i := 0; i < 100; i++ {
|
||||
gr, match, err := g.Search("INFO", makeSearchFunc(i))
|
||||
require.NoError(t, err, "Failed to search for line, tc #%d", i)
|
||||
assert.True(t, match, "Expected Search to return exact match, tc #%d", i)
|
||||
line, err := gr.ReadLine()
|
||||
require.NoError(t, err, "Failed to read line after search, tc #%d", i)
|
||||
if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", i)) {
|
||||
t.Fatalf("Failed to get correct line, tc #%d", i)
|
||||
}
|
||||
// Make sure we can continue to read from there.
|
||||
cur := i + 1
|
||||
for {
|
||||
line, err := gr.ReadLine()
|
||||
if err == io.EOF {
|
||||
if cur == 99+1 {
|
||||
// OK!
|
||||
break
|
||||
} else {
|
||||
t.Fatalf("Got EOF after the wrong INFO #, tc #%d", i)
|
||||
}
|
||||
} else if err != nil {
|
||||
t.Fatalf("Error reading line, tc #%d, err:\n%s", i, err)
|
||||
}
|
||||
if !strings.HasPrefix(line, "INFO ") {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", cur)) {
|
||||
t.Fatalf("Unexpected INFO #. Expected %v got:\n%v, tc #%d", cur, line, i)
|
||||
}
|
||||
cur++
|
||||
}
|
||||
gr.Close()
|
||||
}
|
||||
|
||||
// Now search for something that is too small.
|
||||
// We should get the first available line.
|
||||
{
|
||||
gr, match, err := g.Search("INFO", makeSearchFunc(-999))
|
||||
require.NoError(t, err, "Failed to search for line")
|
||||
assert.False(t, match, "Expected Search to not return exact match")
|
||||
line, err := gr.ReadLine()
|
||||
require.NoError(t, err, "Failed to read line after search")
|
||||
if !strings.HasPrefix(line, "INFO 0 ") {
|
||||
t.Error("Failed to fetch correct line, which is the earliest INFO")
|
||||
}
|
||||
err = gr.Close()
|
||||
require.NoError(t, err, "Failed to close GroupReader")
|
||||
}
|
||||
|
||||
// Now search for something that is too large.
|
||||
// We should get an EOF error.
|
||||
{
|
||||
gr, _, err := g.Search("INFO", makeSearchFunc(999))
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Nil(t, gr)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
destroyTestGroup(t, g)
|
||||
}
|
||||
|
||||
func TestRotateFile(t *testing.T) {
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
g.WriteLine("Line 1")
|
||||
@@ -237,100 +132,6 @@ func TestRotateFile(t *testing.T) {
|
||||
destroyTestGroup(t, g)
|
||||
}
|
||||
|
||||
func TestFindLast1(t *testing.T) {
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
g.WriteLine("Line 1")
|
||||
g.WriteLine("Line 2")
|
||||
g.WriteLine("# a")
|
||||
g.WriteLine("Line 3")
|
||||
g.FlushAndSync()
|
||||
g.RotateFile()
|
||||
g.WriteLine("Line 4")
|
||||
g.WriteLine("Line 5")
|
||||
g.WriteLine("Line 6")
|
||||
g.WriteLine("# b")
|
||||
g.FlushAndSync()
|
||||
|
||||
match, found, err := g.FindLast("#")
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, found)
|
||||
assert.Equal(t, "# b", match)
|
||||
|
||||
// Cleanup
|
||||
destroyTestGroup(t, g)
|
||||
}
|
||||
|
||||
func TestFindLast2(t *testing.T) {
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
g.WriteLine("Line 1")
|
||||
g.WriteLine("Line 2")
|
||||
g.WriteLine("Line 3")
|
||||
g.FlushAndSync()
|
||||
g.RotateFile()
|
||||
g.WriteLine("# a")
|
||||
g.WriteLine("Line 4")
|
||||
g.WriteLine("Line 5")
|
||||
g.WriteLine("# b")
|
||||
g.WriteLine("Line 6")
|
||||
g.FlushAndSync()
|
||||
|
||||
match, found, err := g.FindLast("#")
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, found)
|
||||
assert.Equal(t, "# b", match)
|
||||
|
||||
// Cleanup
|
||||
destroyTestGroup(t, g)
|
||||
}
|
||||
|
||||
func TestFindLast3(t *testing.T) {
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
g.WriteLine("Line 1")
|
||||
g.WriteLine("# a")
|
||||
g.WriteLine("Line 2")
|
||||
g.WriteLine("# b")
|
||||
g.WriteLine("Line 3")
|
||||
g.FlushAndSync()
|
||||
g.RotateFile()
|
||||
g.WriteLine("Line 4")
|
||||
g.WriteLine("Line 5")
|
||||
g.WriteLine("Line 6")
|
||||
g.FlushAndSync()
|
||||
|
||||
match, found, err := g.FindLast("#")
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, found)
|
||||
assert.Equal(t, "# b", match)
|
||||
|
||||
// Cleanup
|
||||
destroyTestGroup(t, g)
|
||||
}
|
||||
|
||||
func TestFindLast4(t *testing.T) {
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
g.WriteLine("Line 1")
|
||||
g.WriteLine("Line 2")
|
||||
g.WriteLine("Line 3")
|
||||
g.FlushAndSync()
|
||||
g.RotateFile()
|
||||
g.WriteLine("Line 4")
|
||||
g.WriteLine("Line 5")
|
||||
g.WriteLine("Line 6")
|
||||
g.FlushAndSync()
|
||||
|
||||
match, found, err := g.FindLast("#")
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, found)
|
||||
assert.Empty(t, match)
|
||||
|
||||
// Cleanup
|
||||
destroyTestGroup(t, g)
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
g := createTestGroupWithHeadSizeLimit(t, 0)
|
||||
|
||||
|
@@ -261,6 +261,8 @@ func TestWaitChan(t *testing.T) {
|
||||
pushed++
|
||||
time.Sleep(time.Duration(cmn.RandIntn(25)) * time.Millisecond)
|
||||
}
|
||||
// apply a deterministic pause so the counter has time to catch up
|
||||
time.Sleep(25 * time.Millisecond)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
@@ -273,7 +275,7 @@ FOR_LOOP:
|
||||
next = next.Next()
|
||||
seen++
|
||||
if next == nil {
|
||||
continue
|
||||
t.Fatal("Next should not be nil when waiting on NextWaitChan")
|
||||
}
|
||||
case <-done:
|
||||
break FOR_LOOP
|
||||
|
@@ -56,7 +56,7 @@ func (cm *CMap) Clear() {
|
||||
func (cm *CMap) Keys() []string {
|
||||
cm.l.Lock()
|
||||
|
||||
keys := []string{}
|
||||
keys := make([]string, 0, len(cm.m))
|
||||
for k := range cm.m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
@@ -66,7 +66,7 @@ func (cm *CMap) Keys() []string {
|
||||
|
||||
func (cm *CMap) Values() []interface{} {
|
||||
cm.l.Lock()
|
||||
items := []interface{}{}
|
||||
items := make([]interface{}, 0, len(cm.m))
|
||||
for _, v := range cm.m {
|
||||
items = append(items, v)
|
||||
}
|
||||
|
@@ -212,35 +212,3 @@ func (fe FmtError) String() string {
|
||||
func (fe FmtError) Format() string {
|
||||
return fe.format
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Panic wrappers
|
||||
// XXX DEPRECATED
|
||||
|
||||
// A panic resulting from a sanity check means there is a programmer error
|
||||
// and some guarantee is not satisfied.
|
||||
// XXX DEPRECATED
|
||||
func PanicSanity(v interface{}) {
|
||||
panic(fmt.Sprintf("Panicked on a Sanity Check: %v", v))
|
||||
}
|
||||
|
||||
// A panic here means something has gone horribly wrong, in the form of data corruption or
|
||||
// failure of the operating system. In a correct/healthy system, these should never fire.
|
||||
// If they do, it's indicative of a much more serious problem.
|
||||
// XXX DEPRECATED
|
||||
func PanicCrisis(v interface{}) {
|
||||
panic(fmt.Sprintf("Panicked on a Crisis: %v", v))
|
||||
}
|
||||
|
||||
// Indicates a failure of consensus. Someone was malicious or something has
|
||||
// gone horribly wrong. These should really boot us into an "emergency-recover" mode
|
||||
// XXX DEPRECATED
|
||||
func PanicConsensus(v interface{}) {
|
||||
panic(fmt.Sprintf("Panicked on a Consensus Failure: %v", v))
|
||||
}
|
||||
|
||||
// For those times when we're not sure if we should panic
|
||||
// XXX DEPRECATED
|
||||
func PanicQ(v interface{}) {
|
||||
panic(fmt.Sprintf("Panicked questionably: %v", v))
|
||||
}
|
||||
|
@@ -300,7 +300,7 @@ func cRandBytes(numBytes int) []byte {
|
||||
b := make([]byte, numBytes)
|
||||
_, err := crand.Read(b)
|
||||
if err != nil {
|
||||
PanicCrisis(err)
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
@@ -1,232 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Used by RepeatTimer the first time,
|
||||
// and every time it's Reset() after Stop().
|
||||
type TickerMaker func(dur time.Duration) Ticker
|
||||
|
||||
// Ticker is a basic ticker interface.
|
||||
type Ticker interface {
|
||||
|
||||
// Never changes, never closes.
|
||||
Chan() <-chan time.Time
|
||||
|
||||
// Stopping a stopped Ticker will panic.
|
||||
Stop()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// defaultTicker
|
||||
|
||||
var _ Ticker = (*defaultTicker)(nil)
|
||||
|
||||
type defaultTicker time.Ticker
|
||||
|
||||
func defaultTickerMaker(dur time.Duration) Ticker {
|
||||
ticker := time.NewTicker(dur)
|
||||
return (*defaultTicker)(ticker)
|
||||
}
|
||||
|
||||
// Implements Ticker
|
||||
func (t *defaultTicker) Chan() <-chan time.Time {
|
||||
return t.C
|
||||
}
|
||||
|
||||
// Implements Ticker
|
||||
func (t *defaultTicker) Stop() {
|
||||
((*time.Ticker)(t)).Stop()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// LogicalTickerMaker
|
||||
|
||||
// Construct a TickerMaker that always uses `source`.
|
||||
// It's useful for simulating a deterministic clock.
|
||||
func NewLogicalTickerMaker(source chan time.Time) TickerMaker {
|
||||
return func(dur time.Duration) Ticker {
|
||||
return newLogicalTicker(source, dur)
|
||||
}
|
||||
}
|
||||
|
||||
type logicalTicker struct {
|
||||
source <-chan time.Time
|
||||
ch chan time.Time
|
||||
quit chan struct{}
|
||||
}
|
||||
|
||||
func newLogicalTicker(source <-chan time.Time, interval time.Duration) Ticker {
|
||||
lt := &logicalTicker{
|
||||
source: source,
|
||||
ch: make(chan time.Time),
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
go lt.fireRoutine(interval)
|
||||
return lt
|
||||
}
|
||||
|
||||
// We need a goroutine to read times from t.source
|
||||
// and fire on t.Chan() when `interval` has passed.
|
||||
func (t *logicalTicker) fireRoutine(interval time.Duration) {
|
||||
source := t.source
|
||||
|
||||
// Init `lasttime`
|
||||
lasttime := time.Time{}
|
||||
select {
|
||||
case lasttime = <-source:
|
||||
case <-t.quit:
|
||||
return
|
||||
}
|
||||
// Init `lasttime` end
|
||||
|
||||
for {
|
||||
select {
|
||||
case newtime := <-source:
|
||||
elapsed := newtime.Sub(lasttime)
|
||||
if interval <= elapsed {
|
||||
// Block for determinism until the ticker is stopped.
|
||||
select {
|
||||
case t.ch <- newtime:
|
||||
case <-t.quit:
|
||||
return
|
||||
}
|
||||
// Reset timeleft.
|
||||
// Don't try to "catch up" by sending more.
|
||||
// "Ticker adjusts the intervals or drops ticks to make up for
|
||||
// slow receivers" - https://golang.org/pkg/time/#Ticker
|
||||
lasttime = newtime
|
||||
}
|
||||
case <-t.quit:
|
||||
return // done
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Ticker
|
||||
func (t *logicalTicker) Chan() <-chan time.Time {
|
||||
return t.ch // immutable
|
||||
}
|
||||
|
||||
// Implements Ticker
|
||||
func (t *logicalTicker) Stop() {
|
||||
close(t.quit) // it *should* panic when stopped twice.
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------
|
||||
|
||||
/*
|
||||
RepeatTimer repeatedly sends a struct{}{} to `.Chan()` after each `dur`
|
||||
period. (It's good for keeping connections alive.)
|
||||
A RepeatTimer must be stopped, or it will keep a goroutine alive.
|
||||
*/
|
||||
type RepeatTimer struct {
|
||||
name string
|
||||
ch chan time.Time
|
||||
tm TickerMaker
|
||||
|
||||
mtx sync.Mutex
|
||||
dur time.Duration
|
||||
ticker Ticker
|
||||
quit chan struct{}
|
||||
}
|
||||
|
||||
// NewRepeatTimer returns a RepeatTimer with a defaultTicker.
|
||||
func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer {
|
||||
return NewRepeatTimerWithTickerMaker(name, dur, defaultTickerMaker)
|
||||
}
|
||||
|
||||
// NewRepeatTimerWithTicker returns a RepeatTimer with the given ticker
|
||||
// maker.
|
||||
func NewRepeatTimerWithTickerMaker(name string, dur time.Duration, tm TickerMaker) *RepeatTimer {
|
||||
var t = &RepeatTimer{
|
||||
name: name,
|
||||
ch: make(chan time.Time),
|
||||
tm: tm,
|
||||
dur: dur,
|
||||
ticker: nil,
|
||||
quit: nil,
|
||||
}
|
||||
t.reset()
|
||||
return t
|
||||
}
|
||||
|
||||
// receive ticks on ch, send out on t.ch
|
||||
func (t *RepeatTimer) fireRoutine(ch <-chan time.Time, quit <-chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case tick := <-ch:
|
||||
select {
|
||||
case t.ch <- tick:
|
||||
case <-quit:
|
||||
return
|
||||
}
|
||||
case <-quit: // NOTE: `t.quit` races.
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *RepeatTimer) Chan() <-chan time.Time {
|
||||
return t.ch
|
||||
}
|
||||
|
||||
func (t *RepeatTimer) Stop() {
|
||||
t.mtx.Lock()
|
||||
defer t.mtx.Unlock()
|
||||
|
||||
t.stop()
|
||||
}
|
||||
|
||||
// Wait the duration again before firing.
|
||||
func (t *RepeatTimer) Reset() {
|
||||
t.mtx.Lock()
|
||||
defer t.mtx.Unlock()
|
||||
|
||||
t.reset()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Misc.
|
||||
|
||||
// CONTRACT: (non-constructor) caller should hold t.mtx.
|
||||
func (t *RepeatTimer) reset() {
|
||||
if t.ticker != nil {
|
||||
t.stop()
|
||||
}
|
||||
t.ticker = t.tm(t.dur)
|
||||
t.quit = make(chan struct{})
|
||||
go t.fireRoutine(t.ticker.Chan(), t.quit)
|
||||
}
|
||||
|
||||
// CONTRACT: caller should hold t.mtx.
|
||||
func (t *RepeatTimer) stop() {
|
||||
if t.ticker == nil {
|
||||
/*
|
||||
Similar to the case of closing channels twice:
|
||||
https://groups.google.com/forum/#!topic/golang-nuts/rhxMiNmRAPk
|
||||
Stopping a RepeatTimer twice implies that you do
|
||||
not know whether you are done or not.
|
||||
If you're calling stop on a stopped RepeatTimer,
|
||||
you probably have race conditions.
|
||||
*/
|
||||
panic("Tried to stop a stopped RepeatTimer")
|
||||
}
|
||||
t.ticker.Stop()
|
||||
t.ticker = nil
|
||||
/*
|
||||
From https://golang.org/pkg/time/#Ticker:
|
||||
"Stop the ticker to release associated resources"
|
||||
"After Stop, no more ticks will be sent"
|
||||
So we shouldn't have to do the below.
|
||||
|
||||
select {
|
||||
case <-t.ch:
|
||||
// read off channel if there's anything there
|
||||
default:
|
||||
}
|
||||
*/
|
||||
close(t.quit)
|
||||
}
|
@@ -1,136 +0,0 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDefaultTicker(t *testing.T) {
|
||||
ticker := defaultTickerMaker(time.Millisecond * 10)
|
||||
<-ticker.Chan()
|
||||
ticker.Stop()
|
||||
}
|
||||
|
||||
func TestRepeatTimer(t *testing.T) {
|
||||
|
||||
ch := make(chan time.Time, 100)
|
||||
mtx := new(sync.Mutex)
|
||||
|
||||
// tick() fires from start to end
|
||||
// (exclusive) in milliseconds with incr.
|
||||
// It locks on mtx, so subsequent calls
|
||||
// run in series.
|
||||
tick := func(startMs, endMs, incrMs time.Duration) {
|
||||
mtx.Lock()
|
||||
go func() {
|
||||
for tMs := startMs; tMs < endMs; tMs += incrMs {
|
||||
lt := time.Time{}
|
||||
lt = lt.Add(tMs * time.Millisecond)
|
||||
ch <- lt
|
||||
}
|
||||
mtx.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
// tock consumes Ticker.Chan() events and checks them against the ms in "timesMs".
|
||||
tock := func(t *testing.T, rt *RepeatTimer, timesMs []int64) {
|
||||
|
||||
// Check against timesMs.
|
||||
for _, timeMs := range timesMs {
|
||||
tyme := <-rt.Chan()
|
||||
sinceMs := tyme.Sub(time.Time{}) / time.Millisecond
|
||||
assert.Equal(t, timeMs, int64(sinceMs))
|
||||
}
|
||||
|
||||
// TODO detect number of running
|
||||
// goroutines to ensure that
|
||||
// no other times will fire.
|
||||
// See https://github.com/tendermint/tendermint/libs/issues/120.
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
done := true
|
||||
select {
|
||||
case <-rt.Chan():
|
||||
done = false
|
||||
default:
|
||||
}
|
||||
assert.True(t, done)
|
||||
}
|
||||
|
||||
tm := NewLogicalTickerMaker(ch)
|
||||
rt := NewRepeatTimerWithTickerMaker("bar", time.Second, tm)
|
||||
|
||||
/* NOTE: Useful for debugging deadlocks...
|
||||
go func() {
|
||||
time.Sleep(time.Second * 3)
|
||||
trace := make([]byte, 102400)
|
||||
count := runtime.Stack(trace, true)
|
||||
fmt.Printf("Stack of %d bytes: %s\n", count, trace)
|
||||
}()
|
||||
*/
|
||||
|
||||
tick(0, 1000, 10)
|
||||
tock(t, rt, []int64{})
|
||||
tick(1000, 2000, 10)
|
||||
tock(t, rt, []int64{1000})
|
||||
tick(2005, 5000, 10)
|
||||
tock(t, rt, []int64{2005, 3005, 4005})
|
||||
tick(5001, 5999, 1)
|
||||
// Read 5005 instead of 5001 because
|
||||
// it's 1 second greater than 4005.
|
||||
tock(t, rt, []int64{5005})
|
||||
tick(6000, 7005, 1)
|
||||
tock(t, rt, []int64{6005})
|
||||
tick(7033, 8032, 1)
|
||||
tock(t, rt, []int64{7033})
|
||||
|
||||
// After a reset, nothing happens
|
||||
// until two ticks are received.
|
||||
rt.Reset()
|
||||
tock(t, rt, []int64{})
|
||||
tick(8040, 8041, 1)
|
||||
tock(t, rt, []int64{})
|
||||
tick(9555, 9556, 1)
|
||||
tock(t, rt, []int64{9555})
|
||||
|
||||
// After a stop, nothing more is sent.
|
||||
rt.Stop()
|
||||
tock(t, rt, []int64{})
|
||||
|
||||
// Another stop panics.
|
||||
assert.Panics(t, func() { rt.Stop() })
|
||||
}
|
||||
|
||||
func TestRepeatTimerReset(t *testing.T) {
|
||||
// check that we are not leaking any go-routines
|
||||
defer leaktest.Check(t)()
|
||||
|
||||
timer := NewRepeatTimer("test", 20*time.Millisecond)
|
||||
defer timer.Stop()
|
||||
|
||||
// test we don't receive tick before duration ms.
|
||||
select {
|
||||
case <-timer.Chan():
|
||||
t.Fatal("did not expect to receive tick")
|
||||
default:
|
||||
}
|
||||
|
||||
timer.Reset()
|
||||
|
||||
// test we receive tick after Reset is called
|
||||
select {
|
||||
case <-timer.Chan():
|
||||
// all good
|
||||
case <-time.After(40 * time.Millisecond):
|
||||
t.Fatal("expected to receive tick after reset")
|
||||
}
|
||||
|
||||
// just random calls
|
||||
for i := 0; i < 100; i++ {
|
||||
time.Sleep(time.Duration(RandIntn(40)) * time.Millisecond)
|
||||
timer.Reset()
|
||||
}
|
||||
}
|
@@ -194,8 +194,7 @@ func (bs *BaseService) Reset() error {
|
||||
|
||||
// OnReset implements Service by panicking.
|
||||
func (bs *BaseService) OnReset() error {
|
||||
PanicSanity("The service cannot be reset")
|
||||
return nil
|
||||
panic("The service cannot be reset")
|
||||
}
|
||||
|
||||
// IsRunning implements Service by returning true or false depending on the
|
||||
@@ -209,7 +208,7 @@ func (bs *BaseService) Wait() {
|
||||
<-bs.quit
|
||||
}
|
||||
|
||||
// String implements Servce by returning a string representation of the service.
|
||||
// String implements Service by returning a string representation of the service.
|
||||
func (bs *BaseService) String() string {
|
||||
return bs.name
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
// make govet noshadow happy...
|
||||
|
||||
asrt "github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
|
353
libs/db/boltdb.go
Normal file
353
libs/db/boltdb.go
Normal file
@@ -0,0 +1,353 @@
|
||||
// +build boltdb
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/etcd-io/bbolt"
|
||||
)
|
||||
|
||||
var bucket = []byte("tm")
|
||||
|
||||
func init() {
|
||||
registerDBCreator(BoltDBBackend, func(name, dir string) (DB, error) {
|
||||
return NewBoltDB(name, dir)
|
||||
}, false)
|
||||
}
|
||||
|
||||
// BoltDB is a wrapper around etcd's fork of bolt
|
||||
// (https://github.com/etcd-io/bbolt).
|
||||
//
|
||||
// NOTE: All operations (including Set, Delete) are synchronous by default. One
|
||||
// can globally turn it off by using NoSync config option (not recommended).
|
||||
//
|
||||
// A single bucket ([]byte("tm")) is used per a database instance. This could
|
||||
// lead to performance issues when/if there will be lots of keys.
|
||||
type BoltDB struct {
|
||||
db *bbolt.DB
|
||||
}
|
||||
|
||||
// NewBoltDB returns a BoltDB with default options.
|
||||
func NewBoltDB(name, dir string) (DB, error) {
|
||||
return NewBoltDBWithOpts(name, dir, bbolt.DefaultOptions)
|
||||
}
|
||||
|
||||
// NewBoltDBWithOpts allows you to supply *bbolt.Options. ReadOnly: true is not
|
||||
// supported because NewBoltDBWithOpts creates a global bucket.
|
||||
func NewBoltDBWithOpts(name string, dir string, opts *bbolt.Options) (DB, error) {
|
||||
if opts.ReadOnly {
|
||||
return nil, errors.New("ReadOnly: true is not supported")
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(dir, name+".db")
|
||||
db, err := bbolt.Open(dbPath, os.ModePerm, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create a global bucket
|
||||
err = db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(bucket)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BoltDB{db: db}, nil
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Get(key []byte) (value []byte) {
|
||||
key = nonEmptyKey(nonNilBytes(key))
|
||||
err := bdb.db.View(func(tx *bbolt.Tx) error {
|
||||
b := tx.Bucket(bucket)
|
||||
value = b.Get(key)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Has(key []byte) bool {
|
||||
return bdb.Get(key) != nil
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Set(key, value []byte) {
|
||||
key = nonEmptyKey(nonNilBytes(key))
|
||||
value = nonNilBytes(value)
|
||||
err := bdb.db.Update(func(tx *bbolt.Tx) error {
|
||||
b := tx.Bucket(bucket)
|
||||
return b.Put(key, value)
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) SetSync(key, value []byte) {
|
||||
bdb.Set(key, value)
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Delete(key []byte) {
|
||||
key = nonEmptyKey(nonNilBytes(key))
|
||||
err := bdb.db.Update(func(tx *bbolt.Tx) error {
|
||||
return tx.Bucket(bucket).Delete(key)
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) DeleteSync(key []byte) {
|
||||
bdb.Delete(key)
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Close() {
|
||||
bdb.db.Close()
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Print() {
|
||||
stats := bdb.db.Stats()
|
||||
fmt.Printf("%v\n", stats)
|
||||
|
||||
err := bdb.db.View(func(tx *bbolt.Tx) error {
|
||||
tx.Bucket(bucket).ForEach(func(k, v []byte) error {
|
||||
fmt.Printf("[%X]:\t[%X]\n", k, v)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Stats() map[string]string {
|
||||
stats := bdb.db.Stats()
|
||||
m := make(map[string]string)
|
||||
|
||||
// Freelist stats
|
||||
m["FreePageN"] = fmt.Sprintf("%v", stats.FreePageN)
|
||||
m["PendingPageN"] = fmt.Sprintf("%v", stats.PendingPageN)
|
||||
m["FreeAlloc"] = fmt.Sprintf("%v", stats.FreeAlloc)
|
||||
m["FreelistInuse"] = fmt.Sprintf("%v", stats.FreelistInuse)
|
||||
|
||||
// Transaction stats
|
||||
m["TxN"] = fmt.Sprintf("%v", stats.TxN)
|
||||
m["OpenTxN"] = fmt.Sprintf("%v", stats.OpenTxN)
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// boltDBBatch stores key values in sync.Map and dumps them to the underlying
|
||||
// DB upon Write call.
|
||||
type boltDBBatch struct {
|
||||
buffer []struct {
|
||||
k []byte
|
||||
v []byte
|
||||
}
|
||||
db *BoltDB
|
||||
}
|
||||
|
||||
// NewBatch returns a new batch.
|
||||
func (bdb *BoltDB) NewBatch() Batch {
|
||||
return &boltDBBatch{
|
||||
buffer: make([]struct {
|
||||
k []byte
|
||||
v []byte
|
||||
}, 0),
|
||||
db: bdb,
|
||||
}
|
||||
}
|
||||
|
||||
// It is safe to modify the contents of the argument after Set returns but not
|
||||
// before.
|
||||
func (bdb *boltDBBatch) Set(key, value []byte) {
|
||||
bdb.buffer = append(bdb.buffer, struct {
|
||||
k []byte
|
||||
v []byte
|
||||
}{
|
||||
key, value,
|
||||
})
|
||||
}
|
||||
|
||||
// It is safe to modify the contents of the argument after Delete returns but
|
||||
// not before.
|
||||
func (bdb *boltDBBatch) Delete(key []byte) {
|
||||
for i, elem := range bdb.buffer {
|
||||
if bytes.Equal(elem.k, key) {
|
||||
// delete without preserving order
|
||||
bdb.buffer[i] = bdb.buffer[len(bdb.buffer)-1]
|
||||
bdb.buffer = bdb.buffer[:len(bdb.buffer)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: the operation is synchronous (see BoltDB for reasons)
|
||||
func (bdb *boltDBBatch) Write() {
|
||||
err := bdb.db.db.Batch(func(tx *bbolt.Tx) error {
|
||||
b := tx.Bucket(bucket)
|
||||
for _, elem := range bdb.buffer {
|
||||
if putErr := b.Put(elem.k, elem.v); putErr != nil {
|
||||
return putErr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bdb *boltDBBatch) WriteSync() {
|
||||
bdb.Write()
|
||||
}
|
||||
|
||||
func (bdb *boltDBBatch) Close() {}
|
||||
|
||||
// WARNING: Any concurrent writes or reads will block until the iterator is
|
||||
// closed.
|
||||
func (bdb *BoltDB) Iterator(start, end []byte) Iterator {
|
||||
tx, err := bdb.db.Begin(false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return newBoltDBIterator(tx, start, end, false)
|
||||
}
|
||||
|
||||
// WARNING: Any concurrent writes or reads will block until the iterator is
|
||||
// closed.
|
||||
func (bdb *BoltDB) ReverseIterator(start, end []byte) Iterator {
|
||||
tx, err := bdb.db.Begin(false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return newBoltDBIterator(tx, start, end, true)
|
||||
}
|
||||
|
||||
// boltDBIterator allows you to iterate on range of keys/values given some
|
||||
// start / end keys (nil & nil will result in doing full scan).
|
||||
type boltDBIterator struct {
|
||||
tx *bbolt.Tx
|
||||
|
||||
itr *bbolt.Cursor
|
||||
start []byte
|
||||
end []byte
|
||||
|
||||
currentKey []byte
|
||||
currentValue []byte
|
||||
|
||||
isInvalid bool
|
||||
isReverse bool
|
||||
}
|
||||
|
||||
func newBoltDBIterator(tx *bbolt.Tx, start, end []byte, isReverse bool) *boltDBIterator {
|
||||
itr := tx.Bucket(bucket).Cursor()
|
||||
|
||||
var ck, cv []byte
|
||||
if isReverse {
|
||||
if end == nil {
|
||||
ck, cv = itr.Last()
|
||||
} else {
|
||||
_, _ = itr.Seek(end) // after key
|
||||
ck, cv = itr.Prev() // return to end key
|
||||
}
|
||||
} else {
|
||||
if start == nil {
|
||||
ck, cv = itr.First()
|
||||
} else {
|
||||
ck, cv = itr.Seek(start)
|
||||
}
|
||||
}
|
||||
|
||||
return &boltDBIterator{
|
||||
tx: tx,
|
||||
itr: itr,
|
||||
start: start,
|
||||
end: end,
|
||||
currentKey: ck,
|
||||
currentValue: cv,
|
||||
isReverse: isReverse,
|
||||
isInvalid: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Domain() ([]byte, []byte) {
|
||||
return itr.start, itr.end
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Valid() bool {
|
||||
if itr.isInvalid {
|
||||
return false
|
||||
}
|
||||
|
||||
// iterated to the end of the cursor
|
||||
if len(itr.currentKey) == 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
|
||||
if itr.isReverse {
|
||||
if itr.start != nil && bytes.Compare(itr.currentKey, itr.start) < 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if itr.end != nil && bytes.Compare(itr.end, itr.currentKey) <= 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Valid
|
||||
return true
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Next() {
|
||||
itr.assertIsValid()
|
||||
if itr.isReverse {
|
||||
itr.currentKey, itr.currentValue = itr.itr.Prev()
|
||||
} else {
|
||||
itr.currentKey, itr.currentValue = itr.itr.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Key() []byte {
|
||||
itr.assertIsValid()
|
||||
return itr.currentKey
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Value() []byte {
|
||||
itr.assertIsValid()
|
||||
return itr.currentValue
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Close() {
|
||||
err := itr.tx.Rollback()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) assertIsValid() {
|
||||
if !itr.Valid() {
|
||||
panic("Boltdb-iterator is invalid")
|
||||
}
|
||||
}
|
||||
|
||||
// nonEmptyKey returns a []byte("nil") if key is empty.
|
||||
// WARNING: this may collude with "nil" user key!
|
||||
func nonEmptyKey(key []byte) []byte {
|
||||
if len(key) == 0 {
|
||||
return []byte("nil")
|
||||
}
|
||||
return key
|
||||
}
|
37
libs/db/boltdb_test.go
Normal file
37
libs/db/boltdb_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
// +build boltdb
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func TestBoltDBNewBoltDB(t *testing.T) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
dir := os.TempDir()
|
||||
defer cleanupDBDir(dir, name)
|
||||
|
||||
db, err := NewBoltDB(name, dir)
|
||||
require.NoError(t, err)
|
||||
db.Close()
|
||||
}
|
||||
|
||||
func BenchmarkBoltDBRandomReadsWrites(b *testing.B) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
db, err := NewBoltDB(name, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
db.Close()
|
||||
cleanupDBDir("", name)
|
||||
}()
|
||||
|
||||
benchmarkRandomReadsWrites(b, db)
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
// +build gcc
|
||||
// +build cleveldb
|
||||
|
||||
package db
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// +build gcc
|
||||
// +build cleveldb
|
||||
|
||||
package db
|
||||
|
||||
@@ -93,7 +93,7 @@ func TestCLevelDBBackend(t *testing.T) {
|
||||
// Can't use "" (current directory) or "./" here because levigo.Open returns:
|
||||
// "Error initializing DB: IO error: test_XXX.db: Invalid argument"
|
||||
dir := os.TempDir()
|
||||
db := NewDB(name, LevelDBBackend, dir)
|
||||
db := NewDB(name, CLevelDBBackend, dir)
|
||||
defer cleanupDBDir(dir, name)
|
||||
|
||||
_, ok := db.(*CLevelDB)
|
||||
@@ -103,7 +103,7 @@ func TestCLevelDBBackend(t *testing.T) {
|
||||
func TestCLevelDBStats(t *testing.T) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
dir := os.TempDir()
|
||||
db := NewDB(name, LevelDBBackend, dir)
|
||||
db := NewDB(name, CLevelDBBackend, dir)
|
||||
defer cleanupDBDir(dir, name)
|
||||
|
||||
assert.NotEmpty(t, db.Stats())
|
||||
|
@@ -1,6 +1,8 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
@@ -8,6 +10,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
//----------------------------------------
|
||||
@@ -188,3 +191,66 @@ func (mockIterator) Value() []byte {
|
||||
|
||||
func (mockIterator) Close() {
|
||||
}
|
||||
|
||||
func benchmarkRandomReadsWrites(b *testing.B, db DB) {
|
||||
b.StopTimer()
|
||||
|
||||
// create dummy data
|
||||
const numItems = int64(1000000)
|
||||
internal := map[int64]int64{}
|
||||
for i := 0; i < int(numItems); i++ {
|
||||
internal[int64(i)] = int64(0)
|
||||
}
|
||||
|
||||
// fmt.Println("ok, starting")
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Write something
|
||||
{
|
||||
idx := int64(cmn.RandInt()) % numItems
|
||||
internal[idx]++
|
||||
val := internal[idx]
|
||||
idxBytes := int642Bytes(int64(idx))
|
||||
valBytes := int642Bytes(int64(val))
|
||||
//fmt.Printf("Set %X -> %X\n", idxBytes, valBytes)
|
||||
db.Set(idxBytes, valBytes)
|
||||
}
|
||||
|
||||
// Read something
|
||||
{
|
||||
idx := int64(cmn.RandInt()) % numItems
|
||||
valExp := internal[idx]
|
||||
idxBytes := int642Bytes(int64(idx))
|
||||
valBytes := db.Get(idxBytes)
|
||||
//fmt.Printf("Get %X -> %X\n", idxBytes, valBytes)
|
||||
if valExp == 0 {
|
||||
if !bytes.Equal(valBytes, nil) {
|
||||
b.Errorf("Expected %v for %v, got %X", nil, idx, valBytes)
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if len(valBytes) != 8 {
|
||||
b.Errorf("Expected length 8 for %v, got %X", idx, valBytes)
|
||||
break
|
||||
}
|
||||
valGot := bytes2Int64(valBytes)
|
||||
if valExp != valGot {
|
||||
b.Errorf("Expected %v for %v, got %v", valExp, idx, valGot)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func int642Bytes(i int64) []byte {
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, uint64(i))
|
||||
return buf
|
||||
}
|
||||
|
||||
func bytes2Int64(buf []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(buf))
|
||||
}
|
||||
|
@@ -5,17 +5,37 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
//----------------------------------------
|
||||
// Main entry
|
||||
|
||||
type DBBackendType string
|
||||
|
||||
// These are valid backend types.
|
||||
const (
|
||||
LevelDBBackend DBBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc
|
||||
CLevelDBBackend DBBackendType = "cleveldb"
|
||||
// LevelDBBackend is a legacy type. Defaults to goleveldb unless cleveldb
|
||||
// build tag was used, in which it becomes cleveldb.
|
||||
// Deprecated: Use concrete types (golevedb, cleveldb, etc.)
|
||||
LevelDBBackend DBBackendType = "leveldb"
|
||||
// GoLevelDBBackend represents goleveldb (github.com/syndtr/goleveldb - most
|
||||
// popular implementation)
|
||||
// - pure go
|
||||
// - stable
|
||||
GoLevelDBBackend DBBackendType = "goleveldb"
|
||||
MemDBBackend DBBackendType = "memdb"
|
||||
FSDBBackend DBBackendType = "fsdb" // using the filesystem naively
|
||||
// CLevelDBBackend represents cleveldb (uses levigo wrapper)
|
||||
// - fast
|
||||
// - requires gcc
|
||||
// - use cleveldb build tag (go build -tags cleveldb)
|
||||
CLevelDBBackend DBBackendType = "cleveldb"
|
||||
// MemDBBackend represents in-memoty key value store, which is mostly used
|
||||
// for testing.
|
||||
MemDBBackend DBBackendType = "memdb"
|
||||
// FSDBBackend represents filesystem database
|
||||
// - EXPERIMENTAL
|
||||
// - slow
|
||||
FSDBBackend DBBackendType = "fsdb"
|
||||
// BoltDBBackend represents bolt (uses etcd's fork of bolt -
|
||||
// github.com/etcd-io/bbolt)
|
||||
// - EXPERIMENTAL
|
||||
// - may be faster is some use-cases (random reads - indexer)
|
||||
// - use boltdb build tag (go build -tags boltdb)
|
||||
BoltDBBackend DBBackendType = "boltdb"
|
||||
)
|
||||
|
||||
type dbCreator func(name string, dir string) (DB, error)
|
||||
|
@@ -20,7 +20,7 @@ const (
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerDBCreator(FSDBBackend, func(name string, dir string) (DB, error) {
|
||||
registerDBCreator(FSDBBackend, func(name, dir string) (DB, error) {
|
||||
dbPath := filepath.Join(dir, name+".db")
|
||||
return NewFSDB(dbPath), nil
|
||||
}, false)
|
||||
|
@@ -9,8 +9,6 @@ import (
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -67,7 +65,7 @@ func (db *GoLevelDB) Set(key []byte, value []byte) {
|
||||
value = nonNilBytes(value)
|
||||
err := db.db.Put(key, value, nil)
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(err)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,7 +75,7 @@ func (db *GoLevelDB) SetSync(key []byte, value []byte) {
|
||||
value = nonNilBytes(value)
|
||||
err := db.db.Put(key, value, &opt.WriteOptions{Sync: true})
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(err)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,7 +84,7 @@ func (db *GoLevelDB) Delete(key []byte) {
|
||||
key = nonNilBytes(key)
|
||||
err := db.db.Delete(key, nil)
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(err)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,7 +93,7 @@ func (db *GoLevelDB) DeleteSync(key []byte) {
|
||||
key = nonNilBytes(key)
|
||||
err := db.db.Delete(key, &opt.WriteOptions{Sync: true})
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(err)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,29 +1,27 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func TestNewGoLevelDB(t *testing.T) {
|
||||
func TestGoLevelDBNewGoLevelDB(t *testing.T) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
// Test write locks
|
||||
db, err := NewGoLevelDB(name, "")
|
||||
defer cleanupDBDir("", name)
|
||||
|
||||
// Test we can't open the db twice for writing
|
||||
wr1, err := NewGoLevelDB(name, "")
|
||||
require.Nil(t, err)
|
||||
defer os.RemoveAll("./" + name + ".db")
|
||||
_, err = NewGoLevelDB(name, "")
|
||||
require.NotNil(t, err)
|
||||
db.Close() // Close the db to release the lock
|
||||
wr1.Close() // Close the db to release the lock
|
||||
|
||||
// Open the db twice in a row to test read-only locks
|
||||
// Test we can open the db twice for reading only
|
||||
ro1, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true})
|
||||
defer ro1.Close()
|
||||
require.Nil(t, err)
|
||||
@@ -32,75 +30,16 @@ func TestNewGoLevelDB(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func BenchmarkRandomReadsWrites(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
numItems := int64(1000000)
|
||||
internal := map[int64]int64{}
|
||||
for i := 0; i < int(numItems); i++ {
|
||||
internal[int64(i)] = int64(0)
|
||||
}
|
||||
db, err := NewGoLevelDB(fmt.Sprintf("test_%x", cmn.RandStr(12)), "")
|
||||
func BenchmarkGoLevelDBRandomReadsWrites(b *testing.B) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
db, err := NewGoLevelDB(name, "")
|
||||
if err != nil {
|
||||
b.Fatal(err.Error())
|
||||
return
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
db.Close()
|
||||
cleanupDBDir("", name)
|
||||
}()
|
||||
|
||||
fmt.Println("ok, starting")
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Write something
|
||||
{
|
||||
idx := (int64(cmn.RandInt()) % numItems)
|
||||
internal[idx]++
|
||||
val := internal[idx]
|
||||
idxBytes := int642Bytes(int64(idx))
|
||||
valBytes := int642Bytes(int64(val))
|
||||
//fmt.Printf("Set %X -> %X\n", idxBytes, valBytes)
|
||||
db.Set(
|
||||
idxBytes,
|
||||
valBytes,
|
||||
)
|
||||
}
|
||||
// Read something
|
||||
{
|
||||
idx := (int64(cmn.RandInt()) % numItems)
|
||||
val := internal[idx]
|
||||
idxBytes := int642Bytes(int64(idx))
|
||||
valBytes := db.Get(idxBytes)
|
||||
//fmt.Printf("Get %X -> %X\n", idxBytes, valBytes)
|
||||
if val == 0 {
|
||||
if !bytes.Equal(valBytes, nil) {
|
||||
b.Errorf("Expected %v for %v, got %X",
|
||||
nil, idx, valBytes)
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if len(valBytes) != 8 {
|
||||
b.Errorf("Expected length 8 for %v, got %X",
|
||||
idx, valBytes)
|
||||
break
|
||||
}
|
||||
valGot := bytes2Int64(valBytes)
|
||||
if val != valGot {
|
||||
b.Errorf("Expected %v for %v, got %v",
|
||||
val, idx, valGot)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
db.Close()
|
||||
}
|
||||
|
||||
func int642Bytes(i int64) []byte {
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, uint64(i))
|
||||
return buf
|
||||
}
|
||||
|
||||
func bytes2Int64(buf []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(buf))
|
||||
benchmarkRandomReadsWrites(b, db)
|
||||
}
|
||||
|
@@ -1,8 +1,6 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
import "sync"
|
||||
|
||||
type atomicSetDeleter interface {
|
||||
Mutex() *sync.Mutex
|
||||
|
@@ -7,7 +7,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerDBCreator(MemDBBackend, func(name string, dir string) (DB, error) {
|
||||
registerDBCreator(MemDBBackend, func(name, dir string) (DB, error) {
|
||||
return NewMemDB(), nil
|
||||
}, false)
|
||||
}
|
||||
|
@@ -28,7 +28,7 @@ func TestRemoteDB(t *testing.T) {
|
||||
client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert)
|
||||
require.Nil(t, err, "expecting a successful client creation")
|
||||
dbName := "test-remote-db"
|
||||
require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "leveldb"}))
|
||||
require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "goleveldb"}))
|
||||
defer func() {
|
||||
err := os.RemoveAll(dbName + ".db")
|
||||
if err != nil {
|
||||
|
@@ -22,6 +22,11 @@ func TestPrefixIteratorNoMatchNil(t *testing.T) {
|
||||
// Empty iterator for db populated after iterator created.
|
||||
func TestPrefixIteratorNoMatch1(t *testing.T) {
|
||||
for backend := range backends {
|
||||
if backend == BoltDBBackend {
|
||||
t.Log("bolt does not support concurrent writes while iterating")
|
||||
continue
|
||||
}
|
||||
|
||||
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
@@ -46,13 +46,16 @@ func TestSubscribe(t *testing.T) {
|
||||
|
||||
err = s.Publish(ctx, "Asylum")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = s.Publish(ctx, "Ivan")
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-published:
|
||||
assertReceive(t, "Quicksilver", subscription.Out())
|
||||
assertCancelled(t, subscription, pubsub.ErrOutOfCapacity)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatal("Expected Publish(Asylum) not to block")
|
||||
}
|
||||
}
|
||||
@@ -101,7 +104,7 @@ func TestSubscribeUnbuffered(t *testing.T) {
|
||||
select {
|
||||
case <-published:
|
||||
t.Fatal("Expected Publish(Darkhawk) to block")
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
case <-time.After(3 * time.Second):
|
||||
assertReceive(t, "Ultron", subscription.Out())
|
||||
assertReceive(t, "Darkhawk", subscription.Out())
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user