mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-16 04:41:59 +00:00
Compare commits
204 Commits
v0.26.4
...
release/v0
Author | SHA1 | Date | |
---|---|---|---|
|
f996b10f47 | ||
|
36d7180ca2 | ||
|
e0f8936455 | ||
|
f2351dc758 | ||
|
9d4f59b836 | ||
|
d2c7f8dbcf | ||
|
8283ca7ddb | ||
|
59cc6d36c9 | ||
|
af8793c01a | ||
|
0b0a8b3128 | ||
|
7ced9e416b | ||
|
af3ba5145a | ||
|
cf737ec85c | ||
|
d32f7d2416 | ||
|
dc6567c677 | ||
|
08dabab024 | ||
|
8a9eecce7f | ||
|
b089587b42 | ||
|
7fd51e6ade | ||
|
966b5bdf6e | ||
|
021b5cc7f6 | ||
|
28d75ec801 | ||
|
792b12573e | ||
|
4f2ef36701 | ||
|
6b1b595951 | ||
|
87bdc42bf8 | ||
|
90ba63948a | ||
|
cce4d21ccb | ||
|
c1f7399a86 | ||
|
44a89a3537 | ||
|
a8dbc64319 | ||
|
af6e6cd350 | ||
|
ad4bd92fec | ||
|
f571ee8876 | ||
|
11e36d0bfb | ||
|
354a08c25a | ||
|
e70f27c8e4 | ||
|
fcebdf6720 | ||
|
9e9026452c | ||
|
45b70ae031 | ||
|
4429826229 | ||
|
1386707ceb | ||
|
1a35895ac8 | ||
|
6941d1bb35 | ||
|
23314daee4 | ||
|
3c8156a55a | ||
|
ffd3bf8448 | ||
|
da33dd04cc | ||
|
d8f0bc3e60 | ||
|
1809efa350 | ||
|
39eba4e154 | ||
|
eb4e23b91e | ||
|
6485e68beb | ||
|
d470945503 | ||
|
8985a1fa63 | ||
|
6dd817cbbc | ||
|
0b3a87a323 | ||
|
e1edd2aa6a | ||
|
9a0bfafef6 | ||
|
a335caaedb | ||
|
ff3c4bfc76 | ||
|
8d2dd7e554 | ||
|
71e5939441 | ||
|
d91ea9b59d | ||
|
9b6b792ce7 | ||
|
57af99d901 | ||
|
a58d5897e4 | ||
|
ddbdffb4e5 | ||
|
d6dd43cdaa | ||
|
75cbe4a1c1 | ||
|
27c1563bf0 | ||
|
4d7b29cd8f | ||
|
90970d0ddc | ||
|
bb0a9b3d6d | ||
|
8992596192 | ||
|
c20fbed2f7 | ||
|
c4157549ab | ||
|
fbd1e79465 | ||
|
1efacaa8d3 | ||
|
98b42e9eb2 | ||
|
2449bf7300 | ||
|
3362da0a69 | ||
|
4514842a63 | ||
|
a97d6995c9 | ||
|
d9d4f3e629 | ||
|
7a8aeff4b0 | ||
|
de5a6010f0 | ||
|
da95f4aa6d | ||
|
4f8769175e | ||
|
40c887baf7 | ||
|
d3e8889411 | ||
|
d17969e378 | ||
|
07263298bd | ||
|
5a2e69df81 | ||
|
f5f1416a14 | ||
|
4d36647eea | ||
|
8fd8f800d0 | ||
|
87991059aa | ||
|
c69dbb25ce | ||
|
bc8874020f | ||
|
55d7238708 | ||
|
4a037f9fe6 | ||
|
aa40cfcbb9 | ||
|
6d6d103f15 | ||
|
239ebe2076 | ||
|
0cba0e11b5 | ||
|
d4e6720541 | ||
|
dcb8f88525 | ||
|
a2a62c9be6 | ||
|
3191ee8bad | ||
|
308b7e3bbe | ||
|
73ea5effe5 | ||
|
d1afa0ed6c | ||
|
ca00cd6a78 | ||
|
4daca1a634 | ||
|
bc00a032c1 | ||
|
5f4d8e031e | ||
|
7b2c4bb493 | ||
|
5f93220c61 | ||
|
ec53ce359b | ||
|
1f68318875 | ||
|
1ccc0918f5 | ||
|
fc031d980b | ||
|
1895cde590 | ||
|
be00cd1add | ||
|
a6011c007d | ||
|
ef94a322b8 | ||
|
7f607d0ce2 | ||
|
81c51cd4fc | ||
|
51094f9417 | ||
|
7644d27307 | ||
|
764cfe33aa | ||
|
616c3a4bae | ||
|
04e97f599a | ||
|
56a4fb4d72 | ||
|
49017a5787 | ||
|
6a80412a01 | ||
|
2348f38927 | ||
|
41e2eeee9c | ||
|
a88e283a9d | ||
|
1e1ca15bcc | ||
|
c6604b5a9b | ||
|
c510f823e7 | ||
|
daddebac29 | ||
|
30f346fe44 | ||
|
4d8f29f79c | ||
|
2182f6a702 | ||
|
a06912b579 | ||
|
0ff715125b | ||
|
385977d5e8 | ||
|
0138530df2 | ||
|
0533c73a50 | ||
|
1beb45511c | ||
|
4a568fcedb | ||
|
b3141d7d02 | ||
|
9a6dd96cba | ||
|
9fa959619a | ||
|
1f09818770 | ||
|
e4806f980b | ||
|
b53a2712df | ||
|
a75dab492c | ||
|
7c9e767e1f | ||
|
f82a8ff73a | ||
|
ae275d791e | ||
|
f5cca9f121 | ||
|
3fbe9f235a | ||
|
f7e463f6d3 | ||
|
bc2a9b20c0 | ||
|
9e075d8dd5 | ||
|
8003786c9a | ||
|
2594cec116 | ||
|
df32ea4be5 | ||
|
f69e2c6d6c | ||
|
d5d0d2bd77 | ||
|
41eaf0e31d | ||
|
68b467886a | ||
|
2f64717bb5 | ||
|
c4a1cfc5c2 | ||
|
0f96bea41d | ||
|
9c236ffd6c | ||
|
9f8761d105 | ||
|
5413c11150 | ||
|
a14fd8eba0 | ||
|
1bb7e31d63 | ||
|
222b8978c8 | ||
|
d9a1aad5c5 | ||
|
8ef0c2681d | ||
|
c4d93fd27b | ||
|
dc2a338d96 | ||
|
725ed7969a | ||
|
44b769b1ac | ||
|
380afaa678 | ||
|
b30c34e713 | ||
|
4039276085 | ||
|
3f987adc92 | ||
|
b11788d36d | ||
|
9adcfe2804 | ||
|
3d15579e0c | ||
|
4571f0fbe8 | ||
|
8a73feae14 | ||
|
e291fbbebe | ||
|
416d143bf7 | ||
|
7213869fc6 | ||
|
ef9902e602 |
@@ -3,10 +3,17 @@ version: 2
|
||||
defaults: &defaults
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
docker:
|
||||
- image: circleci/golang:1.10.3
|
||||
- image: circleci/golang:1.11.4
|
||||
environment:
|
||||
GOBIN: /tmp/workspace/bin
|
||||
|
||||
docs_update_config: &docs_update_config
|
||||
working_directory: ~/repo
|
||||
docker:
|
||||
- image: tendermint/docs_deployment
|
||||
environment:
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
jobs:
|
||||
setup_dependencies:
|
||||
<<: *defaults
|
||||
@@ -41,10 +48,10 @@ jobs:
|
||||
key: v3-pkg-cache
|
||||
paths:
|
||||
- /go/pkg
|
||||
# - save_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
# paths:
|
||||
# - /go/src/github.com/tendermint/tendermint
|
||||
- save_cache:
|
||||
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
paths:
|
||||
- /go/src/github.com/tendermint/tendermint
|
||||
|
||||
build_slate:
|
||||
<<: *defaults
|
||||
@@ -53,23 +60,8 @@ jobs:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# https://discuss.circleci.com/t/saving-cache-stopped-working-warning-skipping-this-step-disabled-in-configuration/24423/2
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- restore_cache:
|
||||
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: slate docs
|
||||
command: |
|
||||
@@ -84,29 +76,14 @@ jobs:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
make get_dev_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- restore_cache:
|
||||
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: metalinter
|
||||
command: |
|
||||
set -ex
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make metalinter
|
||||
make lint
|
||||
- run:
|
||||
name: check_dep
|
||||
command: |
|
||||
@@ -121,22 +98,8 @@ jobs:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- restore_cache:
|
||||
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: Run abci apps tests
|
||||
command: |
|
||||
@@ -152,22 +115,8 @@ jobs:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- restore_cache:
|
||||
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: Run abci-cli tests
|
||||
command: |
|
||||
@@ -181,22 +130,8 @@ jobs:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- restore_cache:
|
||||
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils
|
||||
- run:
|
||||
name: Run tests
|
||||
@@ -210,22 +145,8 @@ jobs:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- restore_cache:
|
||||
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run: mkdir -p /tmp/logs
|
||||
- run:
|
||||
name: Run tests
|
||||
@@ -233,7 +154,7 @@ jobs:
|
||||
for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do
|
||||
id=$(basename "$pkg")
|
||||
|
||||
GOCACHE=off go test -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
GOCACHE=off go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
done
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
@@ -249,22 +170,8 @@ jobs:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- restore_cache:
|
||||
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: Run tests
|
||||
command: bash test/persist/test_failure_indices.sh
|
||||
@@ -285,9 +192,7 @@ jobs:
|
||||
name: run localnet and exit on failure
|
||||
command: |
|
||||
set -x
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make build-linux
|
||||
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang:1.11.4 make build-linux
|
||||
make localnet-start &
|
||||
./scripts/localnet-blocks-test.sh 40 5 10 localhost
|
||||
|
||||
@@ -310,22 +215,10 @@ jobs:
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
- restore_cache:
|
||||
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: gather
|
||||
command: |
|
||||
@@ -339,10 +232,25 @@ jobs:
|
||||
name: upload
|
||||
command: bash .circleci/codecov.sh -f coverage.txt
|
||||
|
||||
deploy_docs:
|
||||
<<: *docs_update_config
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Trigger website build
|
||||
command: |
|
||||
chamber exec tendermint -- start_website_build
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test-suite:
|
||||
jobs:
|
||||
- deploy_docs:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- develop
|
||||
- setup_dependencies
|
||||
- lint:
|
||||
requires:
|
||||
|
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -4,4 +4,6 @@
|
||||
* @ebuchman @melekes @xla
|
||||
|
||||
# Precious documentation
|
||||
/docs/ @zramsay
|
||||
/docs/README.md @zramsay
|
||||
/docs/DOCS_README.md @zramsay
|
||||
/docs/.vuepress/ @zramsay
|
||||
|
59
.golangci.yml
Normal file
59
.golangci.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
run:
|
||||
deadline: 1m
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- gocyclo
|
||||
- golint
|
||||
- maligned
|
||||
- errcheck
|
||||
- staticcheck
|
||||
- dupl
|
||||
- ineffassign
|
||||
- interfacer
|
||||
- unconvert
|
||||
- goconst
|
||||
- unparam
|
||||
- nakedret
|
||||
- lll
|
||||
- gochecknoglobals
|
||||
- gocritic
|
||||
- gochecknoinits
|
||||
- scopelint
|
||||
- stylecheck
|
||||
|
||||
# linters-settings:
|
||||
# govet:
|
||||
# check-shadowing: true
|
||||
# golint:
|
||||
# min-confidence: 0
|
||||
# gocyclo:
|
||||
# min-complexity: 10
|
||||
# maligned:
|
||||
# suggest-new: true
|
||||
# dupl:
|
||||
# threshold: 100
|
||||
# goconst:
|
||||
# min-len: 2
|
||||
# min-occurrences: 2
|
||||
# depguard:
|
||||
# list-type: blacklist
|
||||
# packages:
|
||||
# # logging is allowed only by logutils.Log, logrus
|
||||
# # is allowed to use only in logutils package
|
||||
# - github.com/sirupsen/logrus
|
||||
# misspell:
|
||||
# locale: US
|
||||
# lll:
|
||||
# line-length: 140
|
||||
# goimports:
|
||||
# local-prefixes: github.com/golangci/golangci-lint
|
||||
# gocritic:
|
||||
# enabled-tags:
|
||||
# - performance
|
||||
# - style
|
||||
# - experimental
|
||||
# disabled-checks:
|
||||
# - wrapperFunc
|
||||
# - commentFormatting # https://github.com/go-critic/go-critic/issues/755
|
431
CHANGELOG.md
431
CHANGELOG.md
@@ -1,13 +1,438 @@
|
||||
# Changelog
|
||||
|
||||
## v0.30.2
|
||||
|
||||
*March 10th, 2019*
|
||||
|
||||
This release fixes a CLevelDB memory leak. It was happening because we were not
|
||||
closing the WriteBatch object after use. See [levigo's
|
||||
godoc](https://godoc.org/github.com/jmhodges/levigo#WriteBatch.Close) for the
|
||||
Close method. Special thanks goes to @Stumble who both reported an issue in
|
||||
[cosmos-sdk](https://github.com/cosmos/cosmos-sdk/issues/3842) and provided a
|
||||
fix here.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* Go API
|
||||
- [libs/db] [\#3842](https://github.com/cosmos/cosmos-sdk/issues/3842) Add Close() method to Batch interface (@Stumble)
|
||||
|
||||
### BUG FIXES:
|
||||
- [libs/db] [\#3842](https://github.com/cosmos/cosmos-sdk/issues/3842) Fix CLevelDB memory leak (@Stumble)
|
||||
|
||||
## v0.30.1
|
||||
|
||||
*February 20th, 2019*
|
||||
|
||||
This release fixes a consensus halt and a DataCorruptionError after restart
|
||||
discovered in `game_of_stakes_6`. It also fixes a security issue in the p2p
|
||||
handshake by authenticating the NetAddress.ID of the peer we're dialing.
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
* [config] [\#3291](https://github.com/tendermint/tendermint/issues/3291) Make
|
||||
config.ResetTestRootWithChainID() create concurrency-safe test directories.
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
* [consensus] [\#3295](https://github.com/tendermint/tendermint/issues/3295)
|
||||
Flush WAL on stop to prevent data corruption during graceful shutdown.
|
||||
* [consensus] [\#3302](https://github.com/tendermint/tendermint/issues/3302)
|
||||
Fix possible halt by resetting TriggeredTimeoutPrecommit before starting next height.
|
||||
* [rpc] [\#3251](https://github.com/tendermint/tendermint/issues/3251) Fix
|
||||
`/net_info#peers#remote_ip` format. New format spec:
|
||||
* dotted decimal ("192.0.2.1"), if ip is an IPv4 or IP4-mapped IPv6 address
|
||||
* IPv6 ("2001:db8::1"), if ip is a valid IPv6 address
|
||||
* [cmd] [\#3314](https://github.com/tendermint/tendermint/issues/3314) Return
|
||||
an error on `show_validator` when the private validator file does not exist.
|
||||
* [p2p] [\#3010](https://github.com/tendermint/tendermint/issues/3010#issuecomment-464287627)
|
||||
Authenticate a peer against its NetAddress.ID when dialing.
|
||||
|
||||
## v0.30.0
|
||||
|
||||
*February 8th, 2019*
|
||||
|
||||
This release fixes yet another issue with the proposer selection algorithm.
|
||||
We hope it's the last one, but we won't be surprised if it's not.
|
||||
We plan to one day expose the selection algorithm more directly to
|
||||
the application ([\#3285](https://github.com/tendermint/tendermint/issues/3285)), and even to support randomness ([\#763](https://github.com/tendermint/tendermint/issues/763)).
|
||||
For more, see issues marked
|
||||
[proposer-selection](https://github.com/tendermint/tendermint/labels/proposer-selection).
|
||||
|
||||
This release also includes a fix to prevent Tendermint from including the same
|
||||
piece of evidence in more than one block. This issue was reported by @chengwenxi in our
|
||||
[bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* Apps
|
||||
- [state] [\#3222](https://github.com/tendermint/tendermint/issues/3222)
|
||||
Duplicate updates for the same validator are forbidden. Apps must ensure
|
||||
that a given `ResponseEndBlock.ValidatorUpdates` contains only one entry per pubkey.
|
||||
|
||||
* Go API
|
||||
- [types] [\#3222](https://github.com/tendermint/tendermint/issues/3222)
|
||||
Remove `Add` and `Update` methods from `ValidatorSet` in favor of new
|
||||
`UpdateWithChangeSet`. This allows updates to be applied as a set, instead of
|
||||
one at a time.
|
||||
|
||||
* Block Protocol
|
||||
- [state] [\#3286](https://github.com/tendermint/tendermint/issues/3286) Blocks that include already committed evidence are invalid.
|
||||
|
||||
* P2P Protocol
|
||||
- [consensus] [\#3222](https://github.com/tendermint/tendermint/issues/3222)
|
||||
Validator updates are applied as a set, instead of one at a time, thus
|
||||
impacting the proposer priority calculation. This ensures that the proposer
|
||||
selection algorithm does not depend on the order of updates in
|
||||
`ResponseEndBlock.ValidatorUpdates`.
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [crypto] [\#3279](https://github.com/tendermint/tendermint/issues/3279) Use `btcec.S256().N` directly instead of hard coding a copy.
|
||||
|
||||
### BUG FIXES:
|
||||
- [state] [\#3222](https://github.com/tendermint/tendermint/issues/3222) Fix validator set updates so they are applied as a set, rather
|
||||
than one at a time. This makes the proposer selection algorithm independent of
|
||||
the order of updates in `ResponseEndBlock.ValidatorUpdates`.
|
||||
- [evidence] [\#3286](https://github.com/tendermint/tendermint/issues/3286) Don't add committed evidence to evidence pool.
|
||||
|
||||
## v0.29.2
|
||||
|
||||
*February 7th, 2019*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@ackratos, @rickyyangz
|
||||
|
||||
**Note**: This release contains security sensitive patches in the `p2p` and
|
||||
`crypto` packages:
|
||||
- p2p:
|
||||
- Partial fix for MITM attacks on the p2p connection. MITM conditions may
|
||||
still exist. See [\#3010](https://github.com/tendermint/tendermint/issues/3010).
|
||||
- crypto:
|
||||
- Eliminate our fork of `btcd` and use the `btcd/btcec` library directly for
|
||||
native secp256k1 signing. Note we still modify the signature encoding to
|
||||
prevent malleability.
|
||||
- Support the libsecp256k1 library via CGo through the `go-ethereum/crypto/secp256k1` package.
|
||||
- Eliminate MixEntropy functions
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* Go API
|
||||
- [crypto] [\#3278](https://github.com/tendermint/tendermint/issues/3278) Remove
|
||||
MixEntropy functions
|
||||
- [types] [\#3245](https://github.com/tendermint/tendermint/issues/3245) Commit uses `type CommitSig Vote` instead of `Vote` directly.
|
||||
In preparation for removing redundant fields from the commit [\#1648](https://github.com/tendermint/tendermint/issues/1648)
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [consensus] [\#3246](https://github.com/tendermint/tendermint/issues/3246) Better logging and notes on recovery for corrupted WAL file
|
||||
- [crypto] [\#3163](https://github.com/tendermint/tendermint/issues/3163) Use ethereum's libsecp256k1 go-wrapper for signatures when cgo is available
|
||||
- [crypto] [\#3162](https://github.com/tendermint/tendermint/issues/3162) Wrap btcd instead of forking it to keep up with fixes (used if cgo is not available)
|
||||
- [makefile] [\#3233](https://github.com/tendermint/tendermint/issues/3233) Use golangci-lint instead of go-metalinter
|
||||
- [tools] [\#3218](https://github.com/tendermint/tendermint/issues/3218) Add go-deadlock tool to help detect deadlocks
|
||||
- [tools] [\#3106](https://github.com/tendermint/tendermint/issues/3106) Add tm-signer-harness test harness for remote signers
|
||||
- [tests] [\#3258](https://github.com/tendermint/tendermint/issues/3258) Fixed a bunch of non-deterministic test failures
|
||||
|
||||
### BUG FIXES:
|
||||
- [node] [\#3186](https://github.com/tendermint/tendermint/issues/3186) EventBus and indexerService should be started before first block (for replay last block on handshake) execution (@ackratos)
|
||||
- [p2p] [\#3232](https://github.com/tendermint/tendermint/issues/3232) Fix infinite loop leading to addrbook deadlock for seed nodes
|
||||
- [p2p] [\#3247](https://github.com/tendermint/tendermint/issues/3247) Fix panic in SeedMode when calling FlushStop and OnStop
|
||||
concurrently
|
||||
- [p2p] [\#3040](https://github.com/tendermint/tendermint/issues/3040) Fix MITM on secret connection by checking low-order points
|
||||
- [privval] [\#3258](https://github.com/tendermint/tendermint/issues/3258) Fix race between sign requests and ping requests in socket that was causing messages to be corrupted
|
||||
|
||||
## v0.29.1
|
||||
|
||||
*January 24, 2019*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@infinytum, @gauthamzz
|
||||
|
||||
This release contains two important fixes: one for p2p layer where we sometimes
|
||||
were not closing connections and one for consensus layer where consensus with
|
||||
no empty blocks (`create_empty_blocks = false`) could halt.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [pex] [\#3037](https://github.com/tendermint/tendermint/issues/3037) Only log "Reached max attempts to dial" once
|
||||
- [rpc] [\#3159](https://github.com/tendermint/tendermint/issues/3159) Expose
|
||||
`triggered_timeout_commit` in the `/dump_consensus_state`
|
||||
|
||||
### BUG FIXES:
|
||||
- [consensus] [\#3199](https://github.com/tendermint/tendermint/issues/3199) Fix consensus halt with no empty blocks from not resetting triggeredTimeoutCommit
|
||||
- [p2p] [\#2967](https://github.com/tendermint/tendermint/issues/2967) Fix file descriptor leak
|
||||
|
||||
## v0.29.0
|
||||
|
||||
*January 21, 2019*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@bradyjoestar, @kunaldhariwal, @gauthamzz, @hrharder
|
||||
|
||||
This release is primarily about making some breaking changes to
|
||||
the Block protocol version before Cosmos launch, and to fixing more issues
|
||||
in the proposer selection algorithm discovered on Cosmos testnets.
|
||||
|
||||
The Block protocol changes include using a standard Merkle tree format (RFC 6962),
|
||||
fixing some inconsistencies between field orders in Vote and Proposal structs,
|
||||
and constraining the hash of the ConsensusParams to include only a few fields.
|
||||
|
||||
The proposer selection algorithm saw significant progress,
|
||||
including a [formal proof by @cwgoes for the base-case in Idris](https://github.com/cwgoes/tm-proposer-idris)
|
||||
and a [much more detailed specification (still in progress) by
|
||||
@ancazamfir](https://github.com/tendermint/tendermint/pull/3140).
|
||||
|
||||
Fixes to the proposer selection algorithm include normalizing the proposer
|
||||
priorities to mitigate the effects of large changes to the validator set.
|
||||
That said, we just discovered [another bug](https://github.com/tendermint/tendermint/issues/3181),
|
||||
which will be fixed in the next breaking release.
|
||||
|
||||
While we are trying to stabilize the Block protocol to preserve compatibility
|
||||
with old chains, there may be some final changes yet to come before Cosmos
|
||||
launch as we continue to audit and test the software.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
|
||||
* Apps
|
||||
- [state] [\#3049](https://github.com/tendermint/tendermint/issues/3049) Total voting power of the validator set is upper bounded by
|
||||
`MaxInt64 / 8`. Apps must ensure they do not return changes to the validator
|
||||
set that cause this maximum to be exceeded.
|
||||
|
||||
* Go API
|
||||
- [node] [\#3082](https://github.com/tendermint/tendermint/issues/3082) MetricsProvider now requires you to pass a chain ID
|
||||
- [types] [\#2713](https://github.com/tendermint/tendermint/issues/2713) Rename `TxProof.LeafHash` to `TxProof.Leaf`
|
||||
- [crypto/merkle] [\#2713](https://github.com/tendermint/tendermint/issues/2713) `SimpleProof.Verify` takes a `leaf` instead of a
|
||||
`leafHash` and performs the hashing itself
|
||||
|
||||
* Blockchain Protocol
|
||||
* [crypto/merkle] [\#2713](https://github.com/tendermint/tendermint/issues/2713) Merkle trees now match the RFC 6962 specification
|
||||
* [types] [\#3078](https://github.com/tendermint/tendermint/issues/3078) Re-order Timestamp and BlockID in CanonicalVote so it's
|
||||
consistent with CanonicalProposal (BlockID comes
|
||||
first)
|
||||
* [types] [\#3165](https://github.com/tendermint/tendermint/issues/3165) Hash of ConsensusParams only includes BlockSize.MaxBytes and
|
||||
BlockSize.MaxGas
|
||||
|
||||
* P2P Protocol
|
||||
- [consensus] [\#3049](https://github.com/tendermint/tendermint/issues/3049) Normalize priorities to not exceed `2*TotalVotingPower` to mitigate unfair proposer selection
|
||||
heavily preferring earlier joined validators in the case of an early bonded large validator unbonding
|
||||
|
||||
### FEATURES:
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [rpc] [\#3065](https://github.com/tendermint/tendermint/issues/3065) Return maxPerPage (100), not defaultPerPage (30) if `per_page` is greater than the max 100.
|
||||
- [instrumentation] [\#3082](https://github.com/tendermint/tendermint/issues/3082) Add `chain_id` label for all metrics
|
||||
|
||||
### BUG FIXES:
|
||||
- [crypto] [\#3164](https://github.com/tendermint/tendermint/issues/3164) Update `btcd` fork for rare signRFC6979 bug
|
||||
- [lite] [\#3171](https://github.com/tendermint/tendermint/issues/3171) Fix verifying large validator set changes
|
||||
- [log] [\#3125](https://github.com/tendermint/tendermint/issues/3125) Fix year format
|
||||
- [mempool] [\#3168](https://github.com/tendermint/tendermint/issues/3168) Limit tx size to fit in the max reactor msg size
|
||||
- [scripts] [\#3147](https://github.com/tendermint/tendermint/issues/3147) Fix json2wal for large block parts (@bradyjoestar)
|
||||
|
||||
## v0.28.1
|
||||
|
||||
*January 18th, 2019*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@HaoyangLiu
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES:
|
||||
- [consensus] Fix consensus halt from proposing blocks with too much evidence
|
||||
|
||||
## v0.28.0
|
||||
|
||||
*January 16th, 2019*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@fmauricios, @gianfelipe93, @husio, @needkane, @srmo, @yutianwu
|
||||
|
||||
This release is primarily about upgrades to the `privval` system -
|
||||
separating the `priv_validator.json` into distinct config and data files, and
|
||||
refactoring the socket validator to support reconnections.
|
||||
|
||||
**Note:** Please backup your existing `priv_validator.json` before using this
|
||||
version.
|
||||
|
||||
See [UPGRADING.md](UPGRADING.md) for more details.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
- [cli] Removed `--proxy_app=dummy` option. Use `kvstore` (`persistent_kvstore`) instead.
|
||||
- [cli] Renamed `--proxy_app=nilapp` to `--proxy_app=noop`.
|
||||
- [config] [\#2992](https://github.com/tendermint/tendermint/issues/2992) `allow_duplicate_ip` is now set to false
|
||||
- [privval] [\#1181](https://github.com/tendermint/tendermint/issues/1181) Split `priv_validator.json` into immutable (`config/priv_validator_key.json`) and mutable (`data/priv_validator_state.json`) parts (@yutianwu)
|
||||
- [privval] [\#2926](https://github.com/tendermint/tendermint/issues/2926) Split up `PubKeyMsg` into `PubKeyRequest` and `PubKeyResponse` to be consistent with other message types
|
||||
- [privval] [\#2923](https://github.com/tendermint/tendermint/issues/2923) Listen for unix socket connections instead of dialing them
|
||||
|
||||
* Apps
|
||||
|
||||
* Go API
|
||||
- [types] [\#2981](https://github.com/tendermint/tendermint/issues/2981) Remove `PrivValidator.GetAddress()`
|
||||
|
||||
* Blockchain Protocol
|
||||
|
||||
* P2P Protocol
|
||||
|
||||
### FEATURES:
|
||||
- [rpc] [\#3052](https://github.com/tendermint/tendermint/issues/3052) Include peer's remote IP in `/net_info`
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [consensus] [\#3086](https://github.com/tendermint/tendermint/issues/3086) Log peerID on ignored votes (@srmo)
|
||||
- [docs] [\#3061](https://github.com/tendermint/tendermint/issues/3061) Added specification for signing consensus msgs at
|
||||
./docs/spec/consensus/signing.md
|
||||
- [privval] [\#2948](https://github.com/tendermint/tendermint/issues/2948) Memoize pubkey so it's only requested once on startup
|
||||
- [privval] [\#2923](https://github.com/tendermint/tendermint/issues/2923) Retry RemoteSigner connections on error
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [build] [\#3085](https://github.com/tendermint/tendermint/issues/3085) Fix `Version` field in build scripts (@husio)
|
||||
- [crypto/multisig] [\#3102](https://github.com/tendermint/tendermint/issues/3102) Fix multisig keys address length
|
||||
- [crypto/encoding] [\#3101](https://github.com/tendermint/tendermint/issues/3101) Fix `PubKeyMultisigThreshold` unmarshalling into `crypto.PubKey` interface
|
||||
- [p2p/conn] [\#3111](https://github.com/tendermint/tendermint/issues/3111) Make SecretConnection thread safe
|
||||
- [rpc] [\#3053](https://github.com/tendermint/tendermint/issues/3053) Fix internal error in `/tx_search` when results are empty
|
||||
(@gianfelipe93)
|
||||
- [types] [\#2926](https://github.com/tendermint/tendermint/issues/2926) Do not panic if retrieving the privval's public key fails
|
||||
|
||||
## v0.27.4
|
||||
|
||||
*December 21st, 2018*
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [mempool] [\#3036](https://github.com/tendermint/tendermint/issues/3036) Fix
|
||||
LRU cache by popping the least recently used item when the cache is full,
|
||||
not the most recently used one!
|
||||
|
||||
## v0.27.3
|
||||
|
||||
*December 16th, 2018*
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* Go API
|
||||
- [dep] [\#3027](https://github.com/tendermint/tendermint/issues/3027) Revert to mainline Go crypto library, eliminating the modified
|
||||
`bcrypt.GenerateFromPassword`
|
||||
|
||||
## v0.27.2
|
||||
|
||||
*December 16th, 2018*
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [node] [\#3025](https://github.com/tendermint/tendermint/issues/3025) Validate NodeInfo addresses on startup.
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [p2p] [\#3025](https://github.com/tendermint/tendermint/pull/3025) Revert to using defers in addrbook. Fixes deadlocks in pex and consensus upon invalid ExternalAddr/ListenAddr configuration.
|
||||
|
||||
## v0.27.1
|
||||
|
||||
*December 15th, 2018*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@danil-lashin, @hleb-albau, @james-ray, @leo-xinwang
|
||||
|
||||
### FEATURES:
|
||||
- [rpc] [\#2964](https://github.com/tendermint/tendermint/issues/2964) Add `UnconfirmedTxs(limit)` and `NumUnconfirmedTxs()` methods to HTTP/Local clients (@danil-lashin)
|
||||
- [docs] [\#3004](https://github.com/tendermint/tendermint/issues/3004) Enable full-text search on docs pages
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [consensus] [\#2971](https://github.com/tendermint/tendermint/issues/2971) Return error if ValidatorSet is empty after InitChain
|
||||
(@leo-xinwang)
|
||||
- [ci/cd] [\#3005](https://github.com/tendermint/tendermint/issues/3005) Updated CircleCI job to trigger website build when docs are updated
|
||||
- [docs] Various updates
|
||||
|
||||
### BUG FIXES:
|
||||
- [cmd] [\#2983](https://github.com/tendermint/tendermint/issues/2983) `testnet` command always sets `addr_book_strict = false`
|
||||
- [config] [\#2980](https://github.com/tendermint/tendermint/issues/2980) Fix CORS options formatting
|
||||
- [kv indexer] [\#2912](https://github.com/tendermint/tendermint/issues/2912) Don't ignore key when executing CONTAINS
|
||||
- [mempool] [\#2961](https://github.com/tendermint/tendermint/issues/2961) Call `notifyTxsAvailable` if there're txs left after committing a block, but recheck=false
|
||||
- [mempool] [\#2994](https://github.com/tendermint/tendermint/issues/2994) Reject txs with negative GasWanted
|
||||
- [p2p] [\#2990](https://github.com/tendermint/tendermint/issues/2990) Fix a bug where seeds don't disconnect from a peer after 3h
|
||||
- [consensus] [\#3006](https://github.com/tendermint/tendermint/issues/3006) Save state after InitChain only when stateHeight is also 0 (@james-ray)
|
||||
|
||||
## v0.27.0
|
||||
|
||||
*December 5th, 2018*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@danil-lashin, @srmo
|
||||
|
||||
Special thanks to @dlguddus for discovering a [major
|
||||
issue](https://github.com/tendermint/tendermint/issues/2718#issuecomment-440888677)
|
||||
in the proposer selection algorithm.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
This release is primarily about fixes to the proposer selection algorithm
|
||||
in preparation for the [Cosmos Game of
|
||||
Stakes](https://blog.cosmos.network/the-game-of-stakes-is-open-for-registration-83a404746ee6).
|
||||
It also makes use of the `ConsensusParams.Validator.PubKeyTypes` to restrict the
|
||||
key types that can be used by validators, and removes the `Heartbeat` consensus
|
||||
message.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
- [rpc] [\#2932](https://github.com/tendermint/tendermint/issues/2932) Rename `accum` to `proposer_priority`
|
||||
|
||||
* Go API
|
||||
- [db] [\#2913](https://github.com/tendermint/tendermint/pull/2913)
|
||||
ReverseIterator API change: start < end, and end is exclusive.
|
||||
- [types] [\#2932](https://github.com/tendermint/tendermint/issues/2932) Rename `Validator.Accum` to `Validator.ProposerPriority`
|
||||
|
||||
* Blockchain Protocol
|
||||
- [state] [\#2714](https://github.com/tendermint/tendermint/issues/2714) Validators can now only use pubkeys allowed within
|
||||
ConsensusParams.Validator.PubKeyTypes
|
||||
|
||||
* P2P Protocol
|
||||
- [consensus] [\#2871](https://github.com/tendermint/tendermint/issues/2871)
|
||||
Remove *ProposalHeartbeat* message as it serves no real purpose (@srmo)
|
||||
- [state] Fixes for proposer selection:
|
||||
- [\#2785](https://github.com/tendermint/tendermint/issues/2785) Accum for new validators is `-1.125*totalVotingPower` instead of 0
|
||||
- [\#2941](https://github.com/tendermint/tendermint/issues/2941) val.Accum is preserved during ValidatorSet.Update to avoid being
|
||||
reset to 0
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [state] [\#2929](https://github.com/tendermint/tendermint/issues/2929) Minor refactor of updateState logic (@danil-lashin)
|
||||
- [node] [\#2959](https://github.com/tendermint/tendermint/issues/2959) Allow node to start even if software's BlockProtocol is
|
||||
different from state's BlockProtocol
|
||||
- [pex] [\#2959](https://github.com/tendermint/tendermint/issues/2959) Pex reactor logger uses `module=pex`
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [p2p] [\#2968](https://github.com/tendermint/tendermint/issues/2968) Panic on transport error rather than continuing to run but not
|
||||
accept new connections
|
||||
- [p2p] [\#2969](https://github.com/tendermint/tendermint/issues/2969) Fix mismatch in peer count between `/net_info` and the prometheus
|
||||
metrics
|
||||
- [rpc] [\#2408](https://github.com/tendermint/tendermint/issues/2408) `/broadcast_tx_commit`: Fix "interface conversion: interface {} in nil, not EventDataTx" panic (could happen if somebody sent a tx using `/broadcast_tx_commit` while Tendermint was being stopped)
|
||||
- [state] [\#2785](https://github.com/tendermint/tendermint/issues/2785) Fix accum for new validators to be `-1.125*totalVotingPower`
|
||||
instead of 0, forcing them to wait before becoming the proposer. Also:
|
||||
- do not batch clip
|
||||
- keep accums averaged near 0
|
||||
- [txindex/kv] [\#2925](https://github.com/tendermint/tendermint/issues/2925) Don't return false positives when range searching for a prefix of a tag value
|
||||
- [types] [\#2938](https://github.com/tendermint/tendermint/issues/2938) Fix regression in v0.26.4 where we panic on empty
|
||||
genDoc.Validators
|
||||
- [types] [\#2941](https://github.com/tendermint/tendermint/issues/2941) Preserve val.Accum during ValidatorSet.Update to avoid it being
|
||||
reset to 0 every time a validator is updated
|
||||
|
||||
## v0.26.4
|
||||
|
||||
*November 27th, 2018*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
ackratos, goolAdapter, james-ray, joe-bowman, kostko,
|
||||
nagarajmanjunath, tomtau
|
||||
|
||||
@ackratos, @goolAdapter, @james-ray, @joe-bowman, @kostko,
|
||||
@nagarajmanjunath, @tomtau
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
@@ -1,14 +1,9 @@
|
||||
# Pending
|
||||
## v0.31.0
|
||||
|
||||
## v0.27.0
|
||||
|
||||
*TBD*
|
||||
**
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
|
@@ -3,7 +3,7 @@ set -e
|
||||
|
||||
# Get the tag from the version, or try to figure it out.
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(awk -F\" '/Version =/ { print $2; exit }' < ../version/version.go)
|
||||
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
|
||||
fi
|
||||
if [ -z "$TAG" ]; then
|
||||
echo "Please specify a tag."
|
||||
|
@@ -3,7 +3,7 @@ set -e
|
||||
|
||||
# Get the tag from the version, or try to figure it out.
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(awk -F\" '/Version =/ { print $2; exit }' < ../version/version.go)
|
||||
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
|
||||
fi
|
||||
if [ -z "$TAG" ]; then
|
||||
echo "Please specify a tag."
|
||||
|
28
Gopkg.lock
generated
28
Gopkg.lock
generated
@@ -10,12 +10,11 @@
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c0decf632843204d2b8781de7b26e7038584e2dcccc7e2f401e88ae85b1df2b7"
|
||||
digest = "1:093bf93a65962e8191e3e8cd8fc6c363f83d43caca9739c906531ba7210a9904"
|
||||
name = "github.com/btcsuite/btcd"
|
||||
packages = ["btcec"]
|
||||
pruneopts = "UT"
|
||||
revision = "67e573d211ace594f1366b4ce9d39726c4b19bd0"
|
||||
revision = "ed77733ec07dfc8a513741138419b8d9d3de9d2d"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf"
|
||||
@@ -35,6 +34,14 @@
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b42be5a3601f833e0b9f2d6625d887ec1309764bfcac3d518f3db425dcd4ec5c"
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
packages = ["crypto/secp256k1"]
|
||||
pruneopts = "T"
|
||||
revision = "9dc5d1a915ac0e0bd8429d6ac41df50eec91de5f"
|
||||
version = "v1.8.21"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70"
|
||||
name = "github.com/fortytw2/leaktest"
|
||||
@@ -360,13 +367,6 @@
|
||||
pruneopts = "UT"
|
||||
revision = "6b91fda63f2e36186f1c9d0e48578defb69c5d43"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:605b6546f3f43745695298ec2d342d3e952b6d91cdf9f349bea9315f677d759f"
|
||||
name = "github.com/tendermint/btcd"
|
||||
packages = ["btcec"]
|
||||
pruneopts = "UT"
|
||||
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ad9c4c1a4e7875330b1f62906f2830f043a23edb5db997e3a5ac5d3e6eadf80a"
|
||||
name = "github.com/tendermint/go-amino"
|
||||
@@ -376,7 +376,7 @@
|
||||
version = "v0.14.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:72b71e3a29775e5752ed7a8012052a3dee165e27ec18cedddae5288058f09acf"
|
||||
digest = "1:00d2b3e64cdc3fa69aa250dfbe4cc38c4837d4f37e62279be2ae52107ffbbb44"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"bcrypt",
|
||||
@@ -397,8 +397,7 @@
|
||||
"salsa20/salsa",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "3764759f34a542a3aef74d6b02e35be7ab893bba"
|
||||
source = "github.com/tendermint/crypto"
|
||||
revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1"
|
||||
@@ -504,8 +503,10 @@
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/btcsuite/btcd/btcec",
|
||||
"github.com/btcsuite/btcutil/base58",
|
||||
"github.com/btcsuite/btcutil/bech32",
|
||||
"github.com/ethereum/go-ethereum/crypto/secp256k1",
|
||||
"github.com/fortytw2/leaktest",
|
||||
"github.com/go-kit/kit/log",
|
||||
"github.com/go-kit/kit/log/level",
|
||||
@@ -535,7 +536,6 @@
|
||||
"github.com/syndtr/goleveldb/leveldb/errors",
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator",
|
||||
"github.com/syndtr/goleveldb/leveldb/opt",
|
||||
"github.com/tendermint/btcd/btcec",
|
||||
"github.com/tendermint/go-amino",
|
||||
"golang.org/x/crypto/bcrypt",
|
||||
"golang.org/x/crypto/chacha20poly1305",
|
||||
|
22
Gopkg.toml
22
Gopkg.toml
@@ -75,14 +75,29 @@
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "^0.9.1"
|
||||
|
||||
# we use the secp256k1 implementation:
|
||||
[[constraint]]
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
version = "^v1.8.21"
|
||||
|
||||
# Prevent dep from pruning build scripts and codegen templates
|
||||
# note: this leaves the whole go-ethereum package in vendor
|
||||
# can be removed when https://github.com/golang/dep/issues/1847 is resolved
|
||||
[[prune.project]]
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
unused-packages = false
|
||||
|
||||
###################################
|
||||
## Some repos dont have releases.
|
||||
## Pin to revision
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/btcsuite/btcd"
|
||||
revision = "ed77733ec07dfc8a513741138419b8d9d3de9d2d"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/crypto"
|
||||
source = "github.com/tendermint/crypto"
|
||||
revision = "3764759f34a542a3aef74d6b02e35be7ab893bba"
|
||||
revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/jmhodges/levigo"
|
||||
@@ -93,9 +108,6 @@
|
||||
name = "github.com/btcsuite/btcutil"
|
||||
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/btcd"
|
||||
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
|
67
Makefile
67
Makefile
@@ -1,7 +1,7 @@
|
||||
GOTOOLS = \
|
||||
github.com/mitchellh/gox \
|
||||
github.com/golang/dep/cmd/dep \
|
||||
github.com/alecthomas/gometalinter \
|
||||
github.com/golangci/golangci-lint/cmd/golangci-lint \
|
||||
github.com/gogo/protobuf/protoc-gen-gogo \
|
||||
github.com/square/certstrap
|
||||
GOBIN?=${GOPATH}/bin
|
||||
@@ -11,8 +11,6 @@ INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protob
|
||||
BUILD_TAGS?='tendermint'
|
||||
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`"
|
||||
|
||||
LINT_FLAGS = --exclude '.*\.pb\.go' --exclude 'vendor/*' --vendor --deadline=600s
|
||||
|
||||
all: check build test install
|
||||
|
||||
check: check_tools get_vendor_deps
|
||||
@@ -32,6 +30,9 @@ build_race:
|
||||
install:
|
||||
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
|
||||
install_c:
|
||||
CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" ./cmd/tendermint
|
||||
|
||||
########################################
|
||||
### Protobuf
|
||||
|
||||
@@ -79,10 +80,6 @@ get_tools:
|
||||
@echo "--> Installing tools"
|
||||
./scripts/get_tools.sh
|
||||
|
||||
get_dev_tools:
|
||||
@echo "--> Downloading linters (this may take awhile)"
|
||||
$(GOPATH)/src/github.com/alecthomas/gometalinter/scripts/install.sh -b $(GOBIN)
|
||||
|
||||
update_tools:
|
||||
@echo "--> Updating tools"
|
||||
./scripts/get_tools.sh
|
||||
@@ -223,6 +220,22 @@ test_race:
|
||||
@echo "--> Running go test --race"
|
||||
@GOCACHE=off go test -p 1 -v -race $(PACKAGES)
|
||||
|
||||
# uses https://github.com/sasha-s/go-deadlock/ to detect potential deadlocks
|
||||
test_with_deadlock:
|
||||
make set_with_deadlock
|
||||
make test
|
||||
make cleanup_after_test_with_deadlock
|
||||
|
||||
set_with_deadlock:
|
||||
find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/sync.RWMutex/deadlock.RWMutex/'
|
||||
find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/sync.Mutex/deadlock.Mutex/'
|
||||
find . -name "*.go" | grep -v "vendor/" | xargs -n 1 goimports -w
|
||||
|
||||
# cleanes up after you ran test_with_deadlock
|
||||
cleanup_after_test_with_deadlock:
|
||||
find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/deadlock.RWMutex/sync.RWMutex/'
|
||||
find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/deadlock.Mutex/sync.Mutex/'
|
||||
find . -name "*.go" | grep -v "vendor/" | xargs -n 1 goimports -w
|
||||
|
||||
########################################
|
||||
### Formatting, linting, and vetting
|
||||
@@ -230,38 +243,9 @@ test_race:
|
||||
fmt:
|
||||
@go fmt ./...
|
||||
|
||||
metalinter:
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
@gometalinter $(LINT_FLAGS) --disable-all \
|
||||
--enable=deadcode \
|
||||
--enable=gosimple \
|
||||
--enable=misspell \
|
||||
--enable=safesql \
|
||||
./...
|
||||
#--enable=gas \
|
||||
#--enable=maligned \
|
||||
#--enable=dupl \
|
||||
#--enable=errcheck \
|
||||
#--enable=goconst \
|
||||
#--enable=gocyclo \
|
||||
#--enable=goimports \
|
||||
#--enable=golint \ <== comments on anything exported
|
||||
#--enable=gotype \
|
||||
#--enable=ineffassign \
|
||||
#--enable=interfacer \
|
||||
#--enable=megacheck \
|
||||
#--enable=staticcheck \
|
||||
#--enable=structcheck \
|
||||
#--enable=unconvert \
|
||||
#--enable=unparam \
|
||||
#--enable=unused \
|
||||
#--enable=varcheck \
|
||||
#--enable=vet \
|
||||
#--enable=vetshadow \
|
||||
|
||||
metalinter_all:
|
||||
@echo "--> Running linter (all)"
|
||||
gometalinter $(LINT_FLAGS) --enable-all --disable=lll ./...
|
||||
@golangci-lint run
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
|
||||
@@ -285,12 +269,11 @@ build-docker:
|
||||
### Local testnet using docker
|
||||
|
||||
# Build linux binary on other platforms
|
||||
build-linux:
|
||||
build-linux: get_tools get_vendor_deps
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
|
||||
build-docker-localnode:
|
||||
cd networks/local
|
||||
make
|
||||
@cd networks/local && make
|
||||
|
||||
# Run a 4-node testnet locally
|
||||
localnet-start: localnet-stop
|
||||
@@ -328,4 +311,4 @@ build-slate:
|
||||
# To avoid unintended conflicts with file names, always add to .PHONY
|
||||
# unless there is a reason not to.
|
||||
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
||||
.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools get_dev_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all
|
||||
.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all build_c install_c test_with_deadlock cleanup_after_test_with_deadlock lint
|
||||
|
158
PHILOSOPHY.md
Normal file
158
PHILOSOPHY.md
Normal file
@@ -0,0 +1,158 @@
|
||||
## Design goals
|
||||
|
||||
The design goals for Tendermint (and the SDK and related libraries) are:
|
||||
|
||||
* Simplicity and Legibility
|
||||
* Parallel performance, namely ability to utilize multicore architecture
|
||||
* Ability to evolve the codebase bug-free
|
||||
* Debuggability
|
||||
* Complete correctness that considers all edge cases, esp in concurrency
|
||||
* Future-proof modular architecture, message protocol, APIs, and encapsulation
|
||||
|
||||
|
||||
### Justification
|
||||
|
||||
Legibility is key to maintaining bug-free software as it evolves toward more
|
||||
optimizations, more ease of debugging, and additional features.
|
||||
|
||||
It is too easy to introduce bugs over time by replacing lines of code with
|
||||
those that may panic, which means ideally locks are unlocked by defer
|
||||
statements.
|
||||
|
||||
For example,
|
||||
|
||||
```go
|
||||
func (obj *MyObj) something() {
|
||||
mtx.Lock()
|
||||
obj.something = other
|
||||
mtx.Unlock()
|
||||
}
|
||||
```
|
||||
|
||||
It is too easy to refactor the codebase in the future to replace `other` with
|
||||
`other.String()` for example, and this may introduce a bug that causes a
|
||||
deadlock. So as much as reasonably possible, we need to be using defer
|
||||
statements, even though it introduces additional overhead.
|
||||
|
||||
If it is necessary to optimize the unlocking of mutex locks, the solution is
|
||||
more modularity via smaller functions, so that defer'd unlocks are scoped
|
||||
within a smaller function.
|
||||
|
||||
Similarly, idiomatic for-loops should always be preferred over those that use
|
||||
custom counters, because it is too easy to evolve the body of a for-loop to
|
||||
become more complicated over time, and it becomes more and more difficult to
|
||||
assess the correctness of such a for-loop by visual inspection.
|
||||
|
||||
|
||||
### On performance
|
||||
|
||||
It doesn't matter whether there are alternative implementations that are 2x or
|
||||
3x more performant, when the software doesn't work, deadlocks, or if bugs
|
||||
cannot be debugged. By taking advantage of multicore concurrency, the
|
||||
Tendermint implementation will at least be an order of magnitude within the
|
||||
range of what is theoretically possible. The design philosophy of Tendermint,
|
||||
and the choice of Go as implementation language, is designed to make Tendermint
|
||||
implementation the standard specification for concurrent BFT software.
|
||||
|
||||
By focusing on the message protocols (e.g. ABCI, p2p messages), and
|
||||
encapsulation e.g. IAVL module, (relatively) independent reactors, we are both
|
||||
implementing a standard implementation to be used as the specification for
|
||||
future implementations in more optimizable languages like Rust, Java, and C++;
|
||||
as well as creating sufficiently performant software. Tendermint Core will
|
||||
never be as fast as future implementations of the Tendermint Spec, because Go
|
||||
isn't designed to be as fast as possible. The advantage of using Go is that we
|
||||
can develop the whole stack of modular components **faster** than in other
|
||||
languages.
|
||||
|
||||
Furthermore, the real bottleneck is in the application layer, and it isn't
|
||||
necessary to support more than a sufficiently decentralized set of validators
|
||||
(e.g. 100 ~ 300 validators is sufficient, with delegated bonded PoS).
|
||||
|
||||
Instead of optimizing Tendermint performance down to the metal, lets focus on
|
||||
optimizing on other matters, namely ability to push feature complete software
|
||||
that works well enough, can be debugged and maintained, and can serve as a spec
|
||||
for future implementations.
|
||||
|
||||
|
||||
### On encapsulation
|
||||
|
||||
In order to create maintainable, forward-optimizable software, it is critical
|
||||
to develop well-encapsulated objects that have well understood properties, and
|
||||
to re-use these easy-to-use-correctly components as building blocks for further
|
||||
encapsulated meta-objects.
|
||||
|
||||
For example, mutexes are cheap enough for Tendermint's design goals when there
|
||||
isn't goroutine contention, so it is encouraged to create concurrency safe
|
||||
structures with struct-level mutexes. If they are used in the context of
|
||||
non-concurrent logic, then the performance is good enough. If they are used in
|
||||
the context of concurrent logic, then it will still perform correctly.
|
||||
|
||||
Examples of this design principle can be seen in the types.ValidatorSet struct,
|
||||
and the cmn.Rand struct. It's one single struct declaration that can be used
|
||||
in both concurrent and non-concurrent logic, and due to its well encapsulation,
|
||||
it's easy to get the usage of the mutex right.
|
||||
|
||||
#### example: cmn.Rand:
|
||||
|
||||
`The default Source is safe for concurrent use by multiple goroutines, but
|
||||
Sources created by NewSource are not`. The reason why the default
|
||||
package-level source is safe for concurrent use is because it is protected (see
|
||||
`lockedSource` in https://golang.org/src/math/rand/rand.go).
|
||||
|
||||
But we shouldn't rely on the global source, we should be creating our own
|
||||
Rand/Source instances and using them, especially for determinism in testing.
|
||||
So it is reasonable to have cmn.Rand be protected by a mutex. Whether we want
|
||||
our own implementation of Rand is another question, but the answer there is
|
||||
also in the affirmative. Sometimes you want to know where Rand is being used
|
||||
in your code, so it becomes a simple matter of dropping in a log statement to
|
||||
inject inspectability into Rand usage. Also, it is nice to be able to extend
|
||||
the functionality of Rand with custom methods. For these reasons, and for the
|
||||
reasons which is outlined in this design philosophy document, we should
|
||||
continue to use the cmn.Rand object, with mutex protection.
|
||||
|
||||
Another key aspect of good encapsulation is the choice of exposed vs unexposed
|
||||
methods. It should be clear to the reader of the code, which methods are
|
||||
intended to be used in what context, and what safe usage is. Part of this is
|
||||
solved by hiding methods via unexported methods. Another part of this is
|
||||
naming conventions on the methods (e.g. underscores) with good documentation,
|
||||
and code organization. If there are too many exposed methods and it isn't
|
||||
clear what methods have what side effects, then there is something wrong about
|
||||
the design of abstractions that should be revisited.
|
||||
|
||||
|
||||
### On concurrency
|
||||
|
||||
In order for Tendermint to remain relevant in the years to come, it is vital
|
||||
for Tendermint to take advantage of multicore architectures. Due to the nature
|
||||
of the problem, namely consensus across a concurrent p2p gossip network, and to
|
||||
handle RPC requests for a large number of consuming subscribers, it is
|
||||
unavoidable for Tendermint development to require expertise in concurrency
|
||||
design, especially when it comes to the reactor design, and also for RPC
|
||||
request handling.
|
||||
|
||||
|
||||
## Guidelines
|
||||
|
||||
Here are some guidelines for designing for (sufficient) performance and concurrency:
|
||||
|
||||
* Mutex locks are cheap enough when there isn't contention.
|
||||
* Do not optimize code without analytical or observed proof that it is in a hot path.
|
||||
* Don't over-use channels when mutex locks w/ encapsulation are sufficient.
|
||||
* The need to drain channels are often a hint of unconsidered edge cases.
|
||||
* The creation of O(N) one-off goroutines is generally technical debt that
|
||||
needs to get addressed sooner than later. Avoid creating too many
|
||||
goroutines as a patch around incomplete concurrency design, or at least be
|
||||
aware of the debt and do not invest in the debt. On the other hand, Tendermint
|
||||
is designed to have a limited number of peers (e.g. 10 or 20), so the creation
|
||||
of O(C) goroutines per O(P) peers is still O(C\*P=constant).
|
||||
* Use defer statements to unlock as much as possible. If you want to unlock sooner,
|
||||
try to create more modular functions that do make use of defer statements.
|
||||
|
||||
## Matras
|
||||
|
||||
* Premature optimization kills
|
||||
* Readability is paramount
|
||||
* Beautiful is better than fast.
|
||||
* In the face of ambiguity, refuse the temptation to guess.
|
||||
* In the face of bugs, refuse the temptation to cover the bug.
|
||||
* There should be one-- and preferably only one --obvious way to do it.
|
96
README.md
96
README.md
@@ -1,8 +1,8 @@
|
||||
# Tendermint
|
||||
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short.
|
||||
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)), for short.
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[
|
||||
- [Remote cluster using terraform and ansible](/docs/networks/terraform-and-ansible.md)
|
||||
- [Join the Cosmos testnet](https://cosmos.network/testnet)
|
||||
|
||||
## Resources
|
||||
|
||||
### Tendermint Core
|
||||
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
the [Tendermint specification](/docs/spec).
|
||||
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: https://tendermint.com/docs/
|
||||
|
||||
### Tools
|
||||
|
||||
Benchmarking and monitoring is provided by `tm-bench` and `tm-monitor`, respectively.
|
||||
Their code is found [here](/tools) and these binaries need to be built seperately.
|
||||
Additional documentation is found [here](/docs/tools).
|
||||
|
||||
### Sub-projects
|
||||
|
||||
* [Amino](http://github.com/tendermint/go-amino), a reflection-based improvement on proto3
|
||||
* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation
|
||||
|
||||
### Applications
|
||||
|
||||
* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
* [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
* [Many more](https://tendermint.com/ecosystem)
|
||||
|
||||
### Research
|
||||
|
||||
* [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
|
||||
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf)
|
||||
* [Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
## Contributing
|
||||
|
||||
Yay open source! Please see our [contributing guidelines](CONTRIBUTING.md).
|
||||
Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions,
|
||||
and the [contributing guidelines](CONTRIBUTING.md) when submitting code.
|
||||
|
||||
Join the larger community on the [forum](https://forum.cosmos.network/) and the [chat](https://riot.im/app/#/room/#tendermint:matrix.org).
|
||||
|
||||
To learn more about the structure of the software, watch the [Developer
|
||||
Sessions](https://www.youtube.com/playlist?list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv)
|
||||
and read some [Architectural
|
||||
Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
|
||||
Learn more by reading the code and comparing it to the
|
||||
[specification](https://github.com/tendermint/tendermint/tree/develop/docs/spec).
|
||||
|
||||
## Versioning
|
||||
|
||||
### SemVer
|
||||
### Semantic Versioning
|
||||
|
||||
Tendermint uses [SemVer](http://semver.org/) to determine when and how the version changes.
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
|
||||
According to SemVer, anything in the public API can change at any time before version 1.0.0
|
||||
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
@@ -119,6 +96,7 @@ include the in-process Go APIs.
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- crypto
|
||||
- types
|
||||
- rpc/client
|
||||
- config
|
||||
@@ -145,8 +123,40 @@ data into the new chain.
|
||||
However, any bump in the PATCH version should be compatible with existing histories
|
||||
(if not please open an [issue](https://github.com/tendermint/tendermint/issues)).
|
||||
|
||||
For more information on upgrading, see [here](./UPGRADING.md)
|
||||
For more information on upgrading, see [UPGRADING.md](./UPGRADING.md)
|
||||
|
||||
## Code of Conduct
|
||||
## Resources
|
||||
|
||||
### Tendermint Core
|
||||
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
[Tendermint specification](/docs/spec).
|
||||
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: https://tendermint.com/docs/
|
||||
|
||||
### Tools
|
||||
|
||||
Benchmarking and monitoring is provided by `tm-bench` and `tm-monitor`, respectively.
|
||||
Their code is found [here](/tools) and these binaries need to be built seperately.
|
||||
Additional documentation is found [here](/docs/tools).
|
||||
|
||||
### Sub-projects
|
||||
|
||||
* [Amino](http://github.com/tendermint/go-amino), reflection-based proto3, with
|
||||
interfaces
|
||||
* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation
|
||||
|
||||
### Applications
|
||||
|
||||
* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
* [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
* [Many more](https://tendermint.com/ecosystem)
|
||||
|
||||
### Research
|
||||
|
||||
* [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
|
||||
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf)
|
||||
* [Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md).
|
||||
|
139
UPGRADING.md
139
UPGRADING.md
@@ -3,9 +3,144 @@
|
||||
This guide provides steps to be followed when you upgrade your applications to
|
||||
a newer version of Tendermint Core.
|
||||
|
||||
## v0.30.0
|
||||
|
||||
This release contains a breaking change to both the block and p2p protocols,
|
||||
however it may be compatible with blockchains created with v0.29.0 depending on
|
||||
the chain history. If your blockchain has not included any pieces of evidence,
|
||||
or no piece of evidence has been included in more than one block,
|
||||
and if your application has never returned multiple updates
|
||||
for the same validator in a single block, then v0.30.0 will work fine with
|
||||
blockchains created with v0.29.0.
|
||||
|
||||
The p2p protocol change is to fix the proposer selection algorithm again.
|
||||
Note that proposer selection is purely a p2p concern right
|
||||
now since the algorithm is only relevant during real time consensus.
|
||||
This change is thus compatible with v0.29.0, but
|
||||
all nodes must be upgraded to avoid disagreements on the proposer.
|
||||
|
||||
### Applications
|
||||
|
||||
Applications must ensure they do not return duplicates in
|
||||
`ResponseEndBlock.ValidatorUpdates`. A pubkey must only appear once per set of
|
||||
updates. Duplicates will cause irrecoverable failure. If you have a very good
|
||||
reason why we shouldn't do this, please open an issue.
|
||||
|
||||
## v0.29.0
|
||||
|
||||
This release contains some breaking changes to the block and p2p protocols,
|
||||
and will not be compatible with any previous versions of the software, primarily
|
||||
due to changes in how various data structures are hashed.
|
||||
|
||||
Any implementations of Tendermint blockchain verification, including lite clients,
|
||||
will need to be updated. For specific details:
|
||||
- [Merkle tree](./docs/spec/blockchain/encoding.md#merkle-trees)
|
||||
- [ConsensusParams](./docs/spec/blockchain/state.md#consensusparams)
|
||||
|
||||
There was also a small change to field ordering in the vote struct. Any
|
||||
implementations of an out-of-process validator (like a Key-Management Server)
|
||||
will need to be updated. For specific details:
|
||||
- [Vote](https://github.com/tendermint/tendermint/blob/develop/docs/spec/consensus/signing.md#votes)
|
||||
|
||||
Finally, the proposer selection algorithm continues to evolve. See the
|
||||
[work-in-progress
|
||||
specification](https://github.com/tendermint/tendermint/pull/3140).
|
||||
|
||||
For everything else, please see the [CHANGELOG](./CHANGELOG.md#v0.29.0).
|
||||
|
||||
## v0.28.0
|
||||
|
||||
This release breaks the format for the `priv_validator.json` file
|
||||
and the protocol used for the external validator process.
|
||||
It is compatible with v0.27.0 blockchains (neither the BlockProtocol nor the
|
||||
P2PProtocol have changed).
|
||||
|
||||
Please read carefully for details about upgrading.
|
||||
|
||||
**Note:** Backup your `config/priv_validator.json`
|
||||
before proceeding.
|
||||
|
||||
### `priv_validator.json`
|
||||
|
||||
The `config/priv_validator.json` is now two files:
|
||||
`config/priv_validator_key.json` and `data/priv_validator_state.json`.
|
||||
The former contains the key material, the later contains the details on the last
|
||||
message signed.
|
||||
|
||||
When running v0.28.0 for the first time, it will back up any pre-existing
|
||||
`priv_validator.json` file and proceed to split it into the two new files.
|
||||
Upgrading should happen automatically without problem.
|
||||
|
||||
To upgrade manually, use the provided `privValUpgrade.go` script, with exact paths for the old
|
||||
`priv_validator.json` and the locations for the two new files. It's recomended
|
||||
to use the default paths, of `config/priv_validator_key.json` and
|
||||
`data/priv_validator_state.json`, respectively:
|
||||
|
||||
```
|
||||
go run scripts/privValUpgrade.go <old-path> <new-key-path> <new-state-path>
|
||||
```
|
||||
|
||||
### External validator signers
|
||||
|
||||
The Unix and TCP implementations of the remote signing validator
|
||||
have been consolidated into a single implementation.
|
||||
Thus in both cases, the external process is expected to dial
|
||||
Tendermint. This is different from how Unix sockets used to work, where
|
||||
Tendermint dialed the external process.
|
||||
|
||||
The `PubKeyMsg` was also split into separate `Request` and `Response` types
|
||||
for consistency with other messages.
|
||||
|
||||
Note that the TCP sockets don't yet use a persistent key,
|
||||
so while they're encrypted, they can't yet be properly authenticated.
|
||||
See [#3105](https://github.com/tendermint/tendermint/issues/3105).
|
||||
Note the Unix socket has neither encryption nor authentication, but will
|
||||
add a shared-secret in [#3099](https://github.com/tendermint/tendermint/issues/3099).
|
||||
|
||||
## v0.27.0
|
||||
|
||||
This release contains some breaking changes to the block and p2p protocols,
|
||||
but does not change any core data structures, so it should be compatible with
|
||||
existing blockchains from the v0.26 series that only used Ed25519 validator keys.
|
||||
Blockchains using Secp256k1 for validators will not be compatible. This is due
|
||||
to the fact that we now enforce which key types validators can use as a
|
||||
consensus param. The default is Ed25519, and Secp256k1 must be activated
|
||||
explicitly.
|
||||
|
||||
It is recommended to upgrade all nodes at once to avoid incompatibilities at the
|
||||
peer layer - namely, the heartbeat consensus message has been removed (only
|
||||
relevant if `create_empty_blocks=false` or `create_empty_blocks_interval > 0`),
|
||||
and the proposer selection algorithm has changed. Since proposer information is
|
||||
never included in the blockchain, this change only affects the peer layer.
|
||||
|
||||
### Go API Changes
|
||||
|
||||
#### libs/db
|
||||
|
||||
The ReverseIterator API has changed the meaning of `start` and `end`.
|
||||
Before, iteration was from `start` to `end`, where
|
||||
`start > end`. Now, iteration is from `end` to `start`, where `start < end`.
|
||||
The iterator also excludes `end`. This change allows a simplified and more
|
||||
intuitive logic, aligning the semantic meaning of `start` and `end` in the
|
||||
`Iterator` and `ReverseIterator`.
|
||||
|
||||
### Applications
|
||||
|
||||
This release enforces a new consensus parameter, the
|
||||
ValidatorParams.PubKeyTypes. Applications must ensure that they only return
|
||||
validator updates with the allowed PubKeyTypes. If a validator update includes a
|
||||
pubkey type that is not included in the ConsensusParams.Validator.PubKeyTypes,
|
||||
block execution will fail and the consensus will halt.
|
||||
|
||||
By default, only Ed25519 pubkeys may be used for validators. Enabling
|
||||
Secp256k1 requires explicit modification of the ConsensusParams.
|
||||
Please update your application accordingly (ie. restrict validators to only be
|
||||
able to use Ed25519 keys, or explicitly add additional key types to the genesis
|
||||
file).
|
||||
|
||||
## v0.26.0
|
||||
|
||||
New 0.26.0 release contains a lot of changes to core data types and protocols. It is not
|
||||
This release contains a lot of changes to core data types and protocols. It is not
|
||||
compatible to the old versions and there is no straight forward way to update
|
||||
old data to be compatible with the new version.
|
||||
|
||||
@@ -67,7 +202,7 @@ For more information, see:
|
||||
|
||||
### Go API Changes
|
||||
|
||||
#### crypto.merkle
|
||||
#### crypto/merkle
|
||||
|
||||
The `merkle.Hasher` interface was removed. Functions which used to take `Hasher`
|
||||
now simply take `[]byte`. This means that any objects being Merklized should be
|
||||
|
2
Vagrantfile
vendored
2
Vagrantfile
vendored
@@ -53,6 +53,6 @@ Vagrant.configure("2") do |config|
|
||||
|
||||
# get all deps and tools, ready to install/test
|
||||
su - vagrant -c 'source /home/vagrant/.bash_profile'
|
||||
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools && make get_dev_tools && make get_vendor_deps'
|
||||
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools && make get_vendor_deps'
|
||||
SHELL
|
||||
end
|
||||
|
@@ -129,7 +129,7 @@ func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
@@ -138,7 +138,7 @@ func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
@@ -147,7 +147,7 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
@@ -156,7 +156,7 @@ func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
|
||||
@@ -165,7 +165,7 @@ func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
|
||||
@@ -174,7 +174,7 @@ func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
@@ -183,7 +183,7 @@ func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
@@ -192,7 +192,7 @@ func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
@@ -201,7 +201,7 @@ func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
@@ -210,7 +210,7 @@ func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
@@ -219,7 +219,7 @@ func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
|
||||
|
@@ -58,7 +58,7 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "counter", "kvstore", "dummy": // for the examples apps, don't pre-run
|
||||
case "counter", "kvstore": // for the examples apps, don't pre-run
|
||||
return nil
|
||||
case "version": // skip running for version command
|
||||
return nil
|
||||
@@ -127,10 +127,6 @@ func addCounterFlags() {
|
||||
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
|
||||
}
|
||||
|
||||
func addDummyFlags() {
|
||||
dummyCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
}
|
||||
|
||||
func addKVStoreFlags() {
|
||||
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
}
|
||||
@@ -152,10 +148,6 @@ func addCommands() {
|
||||
// examples
|
||||
addCounterFlags()
|
||||
RootCmd.AddCommand(counterCmd)
|
||||
// deprecated, left for backwards compatibility
|
||||
addDummyFlags()
|
||||
RootCmd.AddCommand(dummyCmd)
|
||||
// replaces dummy, see issue #196
|
||||
addKVStoreFlags()
|
||||
RootCmd.AddCommand(kvstoreCmd)
|
||||
}
|
||||
@@ -291,18 +283,6 @@ var counterCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
// deprecated, left for backwards compatibility
|
||||
var dummyCmd = &cobra.Command{
|
||||
Use: "dummy",
|
||||
Deprecated: "use: [abci-cli kvstore] instead",
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdKVStore(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
var kvstoreCmd = &cobra.Command{
|
||||
Use: "kvstore",
|
||||
Short: "ABCI demo example",
|
||||
@@ -414,7 +394,6 @@ func cmdConsole(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
|
@@ -83,7 +83,7 @@ func TestWriteReadMessage2(t *testing.T) {
|
||||
Log: phrase,
|
||||
GasWanted: 10,
|
||||
Tags: []cmn.KVPair{
|
||||
cmn.KVPair{Key: []byte("abc"), Value: []byte("def")},
|
||||
{Key: []byte("abc"), Value: []byte("def")},
|
||||
},
|
||||
},
|
||||
// TODO: add the rest
|
||||
|
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/go-amino"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
|
||||
proto "github.com/tendermint/tendermint/benchmarks/proto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
|
@@ -363,7 +363,8 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
|
||||
pool.errorsCh <- peerError{err, peerID}
|
||||
}
|
||||
|
||||
// unused by tendermint; left for debugging purposes
|
||||
// for debugging purposes
|
||||
//nolint:unused
|
||||
func (pool *BlockPool) debug() string {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
@@ -302,7 +301,7 @@ FOR_LOOP:
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartsHeader := firstParts.Header()
|
||||
firstID := types.BlockID{first.Hash(), firstPartsHeader}
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
@@ -338,8 +337,7 @@ FOR_LOOP:
|
||||
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
cmn.PanicQ(fmt.Sprintf("Failed to process committed block (%d:%X): %v",
|
||||
first.Height, first.Hash(), err))
|
||||
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
@@ -432,11 +430,7 @@ type bcBlockResponseMessage struct {
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcBlockResponseMessage) ValidateBasic() error {
|
||||
if err := m.Block.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return m.Block.ValidateBasic()
|
||||
}
|
||||
|
||||
func (m *bcBlockResponseMessage) String() string {
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -42,7 +43,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
|
||||
}
|
||||
|
||||
func makeVote(header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote {
|
||||
addr := privVal.GetAddress()
|
||||
addr := privVal.GetPubKey().Address()
|
||||
idx, _ := valset.GetByAddress(addr)
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: addr,
|
||||
@@ -95,13 +96,13 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
|
||||
|
||||
// let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := &types.Commit{}
|
||||
lastCommit := types.NewCommit(types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0])
|
||||
lastCommit = &types.Commit{Precommits: []*types.Vote{vote}, BlockID: lastBlockMeta.BlockID}
|
||||
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0]).CommitSig()
|
||||
lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{vote})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
@@ -125,6 +126,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
|
||||
|
||||
func TestNoBlockResponse(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
@@ -184,6 +186,7 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
// that seems extreme.
|
||||
func TestBadBlockStopsPeer(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(148)
|
||||
|
@@ -3,9 +3,11 @@ package blockchain
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -20,7 +22,17 @@ import (
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
|
||||
// A cleanupFunc cleans up any config / test files created for a particular
|
||||
// test.
|
||||
type cleanupFunc func()
|
||||
|
||||
// make a Commit with a single vote containing just the height and a timestamp
|
||||
func makeTestCommit(height int64, timestamp time.Time) *types.Commit {
|
||||
commitSigs := []*types.CommitSig{{Height: height, Timestamp: timestamp}}
|
||||
return types.NewCommit(types.BlockID{}, commitSigs)
|
||||
}
|
||||
|
||||
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
|
||||
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
|
||||
@@ -30,7 +42,7 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
|
||||
if err != nil {
|
||||
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
|
||||
}
|
||||
return state, NewBlockStore(blockDB)
|
||||
return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) }
|
||||
}
|
||||
|
||||
func TestLoadBlockStoreStateJSON(t *testing.T) {
|
||||
@@ -80,20 +92,32 @@ func freshBlockStore() (*BlockStore, db.DB) {
|
||||
}
|
||||
|
||||
var (
|
||||
state, _ = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
|
||||
block = makeBlock(1, state, new(types.Commit))
|
||||
partSet = block.MakePartSet(2)
|
||||
part1 = partSet.GetPart(0)
|
||||
part2 = partSet.GetPart(1)
|
||||
seenCommit1 = &types.Commit{Precommits: []*types.Vote{{Height: 10,
|
||||
Timestamp: tmtime.Now()}}}
|
||||
state sm.State
|
||||
block *types.Block
|
||||
partSet *types.PartSet
|
||||
part1 *types.Part
|
||||
part2 *types.Part
|
||||
seenCommit1 *types.Commit
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var cleanup cleanupFunc
|
||||
state, _, cleanup = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
block = makeBlock(1, state, new(types.Commit))
|
||||
partSet = block.MakePartSet(2)
|
||||
part1 = partSet.GetPart(0)
|
||||
part2 = partSet.GetPart(1)
|
||||
seenCommit1 = makeTestCommit(10, tmtime.Now())
|
||||
code := m.Run()
|
||||
cleanup()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// TODO: This test should be simplified ...
|
||||
|
||||
func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
defer cleanup()
|
||||
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
|
||||
|
||||
// check there are no blocks at various heights
|
||||
@@ -107,8 +131,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
// save a block
|
||||
block := makeBlock(bs.Height()+1, state, new(types.Commit))
|
||||
validPartSet := block.MakePartSet(2)
|
||||
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
|
||||
Timestamp: tmtime.Now()}}}
|
||||
seenCommit := makeTestCommit(10, tmtime.Now())
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
|
||||
|
||||
@@ -127,8 +150,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
|
||||
// End of setup, test data
|
||||
|
||||
commitAtH10 := &types.Commit{Precommits: []*types.Vote{{Height: 10,
|
||||
Timestamp: tmtime.Now()}}}
|
||||
commitAtH10 := makeTestCommit(10, tmtime.Now())
|
||||
tuples := []struct {
|
||||
block *types.Block
|
||||
parts *types.PartSet
|
||||
@@ -311,7 +333,7 @@ func TestLoadBlockPart(t *testing.T) {
|
||||
gotPart, _, panicErr := doFn(loadPart)
|
||||
require.Nil(t, panicErr, "an existent and proper block should not panic")
|
||||
require.Nil(t, res, "a properly saved block should return a proper block")
|
||||
require.Equal(t, gotPart.(*types.Part).Hash(), part1.Hash(),
|
||||
require.Equal(t, gotPart.(*types.Part), part1,
|
||||
"expecting successful retrieval of previously saved block")
|
||||
}
|
||||
|
||||
@@ -346,14 +368,13 @@ func TestLoadBlockMeta(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlockFetchAtHeight(t *testing.T) {
|
||||
state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
defer cleanup()
|
||||
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
|
||||
block := makeBlock(bs.Height()+1, state, new(types.Commit))
|
||||
|
||||
partSet := block.MakePartSet(2)
|
||||
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
|
||||
Timestamp: tmtime.Now()}}}
|
||||
|
||||
seenCommit := makeTestCommit(10, tmtime.Now())
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
@@ -13,9 +14,10 @@ import (
|
||||
|
||||
func main() {
|
||||
var (
|
||||
addr = flag.String("addr", ":26659", "Address of client to connect to")
|
||||
chainID = flag.String("chain-id", "mychain", "chain id")
|
||||
privValPath = flag.String("priv", "", "priv val file path")
|
||||
addr = flag.String("addr", ":26659", "Address of client to connect to")
|
||||
chainID = flag.String("chain-id", "mychain", "chain id")
|
||||
privValKeyPath = flag.String("priv-key", "", "priv val key file path")
|
||||
privValStatePath = flag.String("priv-state", "", "priv val state file path")
|
||||
|
||||
logger = log.NewTMLogger(
|
||||
log.NewSyncWriter(os.Stdout),
|
||||
@@ -27,18 +29,26 @@ func main() {
|
||||
"Starting private validator",
|
||||
"addr", *addr,
|
||||
"chainID", *chainID,
|
||||
"privPath", *privValPath,
|
||||
"privKeyPath", *privValKeyPath,
|
||||
"privStatePath", *privValStatePath,
|
||||
)
|
||||
|
||||
pv := privval.LoadFilePV(*privValPath)
|
||||
pv := privval.LoadFilePV(*privValKeyPath, *privValStatePath)
|
||||
|
||||
rs := privval.NewRemoteSigner(
|
||||
logger,
|
||||
*chainID,
|
||||
*addr,
|
||||
pv,
|
||||
ed25519.GenPrivKey(),
|
||||
)
|
||||
var dialer privval.Dialer
|
||||
protocol, address := cmn.ProtocolAndAddress(*addr)
|
||||
switch protocol {
|
||||
case "unix":
|
||||
dialer = privval.DialUnixFn(address)
|
||||
case "tcp":
|
||||
connTimeout := 3 * time.Second // TODO
|
||||
dialer = privval.DialTCPFn(address, connTimeout, ed25519.GenPrivKey())
|
||||
default:
|
||||
logger.Error("Unknown protocol", "protocol", protocol)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
rs := privval.NewRemoteSigner(logger, *chainID, pv, dialer)
|
||||
err := rs.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@@ -17,7 +17,7 @@ var GenValidatorCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func genValidator(cmd *cobra.Command, args []string) {
|
||||
pv := privval.GenFilePV("")
|
||||
pv := privval.GenFilePV("", "")
|
||||
jsbz, err := cdc.MarshalJSON(pv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -26,15 +25,18 @@ func initFiles(cmd *cobra.Command, args []string) error {
|
||||
|
||||
func initFilesWithConfig(config *cfg.Config) error {
|
||||
// private validator
|
||||
privValFile := config.PrivValidatorFile()
|
||||
privValKeyFile := config.PrivValidatorKeyFile()
|
||||
privValStateFile := config.PrivValidatorStateFile()
|
||||
var pv *privval.FilePV
|
||||
if cmn.FileExists(privValFile) {
|
||||
pv = privval.LoadFilePV(privValFile)
|
||||
logger.Info("Found private validator", "path", privValFile)
|
||||
if cmn.FileExists(privValKeyFile) {
|
||||
pv = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
logger.Info("Found private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv = privval.GenFilePV(privValFile)
|
||||
pv = privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator", "path", privValFile)
|
||||
logger.Info("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
|
||||
nodeKeyFile := config.NodeKeyFile()
|
||||
@@ -57,9 +59,10 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
GenesisTime: tmtime.Now(),
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
}
|
||||
key := pv.GetPubKey()
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
Address: pv.GetPubKey().Address(),
|
||||
PubKey: pv.GetPubKey(),
|
||||
Address: key.Address(),
|
||||
PubKey: key,
|
||||
Power: 10,
|
||||
}}
|
||||
|
||||
|
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
@@ -27,36 +28,41 @@ var ResetPrivValidatorCmd = &cobra.Command{
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetAll(cmd *cobra.Command, args []string) {
|
||||
ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorFile(), logger)
|
||||
ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(), logger)
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) {
|
||||
resetFilePV(config.PrivValidatorFile(), logger)
|
||||
resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger)
|
||||
}
|
||||
|
||||
// ResetAll removes the privValidator and address book files plus all data.
|
||||
// ResetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
// Exported so other CLI tools can use it.
|
||||
func ResetAll(dbDir, addrBookFile, privValFile string, logger log.Logger) {
|
||||
resetFilePV(privValFile, logger)
|
||||
func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
removeAddrBook(addrBookFile, logger)
|
||||
if err := os.RemoveAll(dbDir); err == nil {
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
} else {
|
||||
logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err)
|
||||
}
|
||||
// recreate the dbDir since the privVal state needs to live there
|
||||
cmn.EnsureDir(dbDir, 0700)
|
||||
resetFilePV(privValKeyFile, privValStateFile, logger)
|
||||
}
|
||||
|
||||
func resetFilePV(privValFile string, logger log.Logger) {
|
||||
if _, err := os.Stat(privValFile); err == nil {
|
||||
pv := privval.LoadFilePV(privValFile)
|
||||
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
if _, err := os.Stat(privValKeyFile); err == nil {
|
||||
pv := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
|
||||
pv.Reset()
|
||||
logger.Info("Reset private validator file to genesis state", "file", privValFile)
|
||||
logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv := privval.GenFilePV(privValFile)
|
||||
pv := privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator file", "file", privValFile)
|
||||
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -22,10 +22,6 @@ var (
|
||||
defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
|
||||
)
|
||||
|
||||
const (
|
||||
rootName = "root"
|
||||
)
|
||||
|
||||
// clearConfig clears env vars, the given root dir, and resets viper.
|
||||
func clearConfig(dir string) {
|
||||
if err := os.Unsetenv("TMHOME"); err != nil {
|
||||
|
@@ -24,7 +24,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().Bool("fast_sync", config.FastSync, "Fast blockchain syncing")
|
||||
|
||||
// abci flags
|
||||
cmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or 'nilapp' or 'kvstore' for local testing.")
|
||||
cmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or one of: 'kvstore', 'persistent_kvstore', 'counter', 'counter_serial' or 'noop' for local testing.")
|
||||
cmd.Flags().String("abci", config.ABCI, "Specify abci transport (socket | grpc)")
|
||||
|
||||
// rpc flags
|
||||
@@ -77,8 +77,6 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
|
||||
|
||||
// Run forever
|
||||
select {}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
|
@@ -16,12 +16,11 @@ var ShowNodeIDCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func showNodeID(cmd *cobra.Command, args []string) error {
|
||||
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(nodeKey.ID())
|
||||
|
||||
fmt.Println(nodeKey.ID())
|
||||
return nil
|
||||
}
|
||||
|
@@ -3,8 +3,10 @@ package commands
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
|
||||
@@ -12,11 +14,21 @@ import (
|
||||
var ShowValidatorCmd = &cobra.Command{
|
||||
Use: "show_validator",
|
||||
Short: "Show this node's validator info",
|
||||
Run: showValidator,
|
||||
RunE: showValidator,
|
||||
}
|
||||
|
||||
func showValidator(cmd *cobra.Command, args []string) {
|
||||
privValidator := privval.LoadOrGenFilePV(config.PrivValidatorFile())
|
||||
pubKeyJSONBytes, _ := cdc.MarshalJSON(privValidator.GetPubKey())
|
||||
fmt.Println(string(pubKeyJSONBytes))
|
||||
func showValidator(cmd *cobra.Command, args []string) error {
|
||||
keyFilePath := config.PrivValidatorKeyFile()
|
||||
if !cmn.FileExists(keyFilePath) {
|
||||
return fmt.Errorf("private validator file %s does not exist", keyFilePath)
|
||||
}
|
||||
|
||||
pv := privval.LoadFilePV(keyFilePath, config.PrivValidatorStateFile())
|
||||
bz, err := cdc.MarshalJSON(pv.GetPubKey())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal private validator pubkey")
|
||||
}
|
||||
|
||||
fmt.Println(string(bz))
|
||||
return nil
|
||||
}
|
||||
|
@@ -85,11 +85,18 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
_ = os.RemoveAll(outputDir)
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(filepath.Join(nodeDir, "data"), nodeDirPerm)
|
||||
if err != nil {
|
||||
_ = os.RemoveAll(outputDir)
|
||||
return err
|
||||
}
|
||||
|
||||
initFilesWithConfig(config)
|
||||
|
||||
pvFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidator)
|
||||
pv := privval.LoadFilePV(pvFile)
|
||||
pvKeyFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorKey)
|
||||
pvStateFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorState)
|
||||
|
||||
pv := privval.LoadFilePV(pvKeyFile, pvStateFile)
|
||||
genVals[i] = types.GenesisValidator{
|
||||
Address: pv.GetPubKey().Address(),
|
||||
PubKey: pv.GetPubKey(),
|
||||
@@ -127,14 +134,32 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Gather persistent peer addresses.
|
||||
var (
|
||||
persistentPeers string
|
||||
err error
|
||||
)
|
||||
if populatePersistentPeers {
|
||||
err := populatePersistentPeersInConfigAndWriteIt(config)
|
||||
persistentPeers, err = persistentPeersString(config)
|
||||
if err != nil {
|
||||
_ = os.RemoveAll(outputDir)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Overwrite default config.
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
config.P2P.AddrBookStrict = false
|
||||
config.P2P.AllowDuplicateIP = true
|
||||
if populatePersistentPeers {
|
||||
config.P2P.PersistentPeers = persistentPeers
|
||||
}
|
||||
|
||||
cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators)
|
||||
return nil
|
||||
}
|
||||
@@ -157,28 +182,16 @@ func hostnameOrIP(i int) string {
|
||||
return fmt.Sprintf("%s%d", hostnamePrefix, i)
|
||||
}
|
||||
|
||||
func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error {
|
||||
func persistentPeersString(config *cfg.Config) (string, error) {
|
||||
persistentPeers := make([]string, nValidators+nNonValidators)
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
persistentPeers[i] = p2p.IDAddressString(nodeKey.ID(), fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
|
||||
}
|
||||
persistentPeersList := strings.Join(persistentPeers, ",")
|
||||
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
config.P2P.PersistentPeers = persistentPeersList
|
||||
config.P2P.AddrBookStrict = false
|
||||
|
||||
// overwrite default config
|
||||
cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config)
|
||||
}
|
||||
|
||||
return nil
|
||||
return strings.Join(persistentPeers, ","), nil
|
||||
}
|
||||
|
@@ -1,7 +1,7 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
|
||||
)
|
||||
|
||||
|
@@ -35,15 +35,24 @@ var (
|
||||
defaultConfigFileName = "config.toml"
|
||||
defaultGenesisJSONName = "genesis.json"
|
||||
|
||||
defaultPrivValName = "priv_validator.json"
|
||||
defaultPrivValKeyName = "priv_validator_key.json"
|
||||
defaultPrivValStateName = "priv_validator_state.json"
|
||||
|
||||
defaultNodeKeyName = "node_key.json"
|
||||
defaultAddrBookName = "addrbook.json"
|
||||
|
||||
defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName)
|
||||
defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName)
|
||||
defaultPrivValPath = filepath.Join(defaultConfigDir, defaultPrivValName)
|
||||
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
|
||||
defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName)
|
||||
defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName)
|
||||
defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName)
|
||||
defaultPrivValKeyPath = filepath.Join(defaultConfigDir, defaultPrivValKeyName)
|
||||
defaultPrivValStatePath = filepath.Join(defaultDataDir, defaultPrivValStateName)
|
||||
|
||||
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
|
||||
defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName)
|
||||
)
|
||||
|
||||
var (
|
||||
oldPrivVal = "priv_validator.json"
|
||||
oldPrivValPath = filepath.Join(defaultConfigDir, oldPrivVal)
|
||||
)
|
||||
|
||||
// Config defines the top level configuration for a Tendermint node
|
||||
@@ -160,7 +169,10 @@ type BaseConfig struct {
|
||||
Genesis string `mapstructure:"genesis_file"`
|
||||
|
||||
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
PrivValidator string `mapstructure:"priv_validator_file"`
|
||||
PrivValidatorKey string `mapstructure:"priv_validator_key_file"`
|
||||
|
||||
// Path to the JSON file containing the last sign state of a validator
|
||||
PrivValidatorState string `mapstructure:"priv_validator_state_file"`
|
||||
|
||||
// TCP or UNIX socket address for Tendermint to listen on for
|
||||
// connections from an external PrivValidator process
|
||||
@@ -183,19 +195,20 @@ type BaseConfig struct {
|
||||
// DefaultBaseConfig returns a default base configuration for a Tendermint node
|
||||
func DefaultBaseConfig() BaseConfig {
|
||||
return BaseConfig{
|
||||
Genesis: defaultGenesisJSONPath,
|
||||
PrivValidator: defaultPrivValPath,
|
||||
NodeKey: defaultNodeKeyPath,
|
||||
Moniker: defaultMoniker,
|
||||
ProxyApp: "tcp://127.0.0.1:26658",
|
||||
ABCI: "socket",
|
||||
LogLevel: DefaultPackageLogLevels(),
|
||||
LogFormat: LogFormatPlain,
|
||||
ProfListenAddress: "",
|
||||
FastSync: true,
|
||||
FilterPeers: false,
|
||||
DBBackend: "leveldb",
|
||||
DBPath: "data",
|
||||
Genesis: defaultGenesisJSONPath,
|
||||
PrivValidatorKey: defaultPrivValKeyPath,
|
||||
PrivValidatorState: defaultPrivValStatePath,
|
||||
NodeKey: defaultNodeKeyPath,
|
||||
Moniker: defaultMoniker,
|
||||
ProxyApp: "tcp://127.0.0.1:26658",
|
||||
ABCI: "socket",
|
||||
LogLevel: DefaultPackageLogLevels(),
|
||||
LogFormat: LogFormatPlain,
|
||||
ProfListenAddress: "",
|
||||
FastSync: true,
|
||||
FilterPeers: false,
|
||||
DBBackend: "leveldb",
|
||||
DBPath: "data",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -218,9 +231,20 @@ func (cfg BaseConfig) GenesisFile() string {
|
||||
return rootify(cfg.Genesis, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorFile returns the full path to the priv_validator.json file
|
||||
func (cfg BaseConfig) PrivValidatorFile() string {
|
||||
return rootify(cfg.PrivValidator, cfg.RootDir)
|
||||
// PrivValidatorKeyFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg BaseConfig) PrivValidatorKeyFile() string {
|
||||
return rootify(cfg.PrivValidatorKey, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorFile returns the full path to the priv_validator_state.json file
|
||||
func (cfg BaseConfig) PrivValidatorStateFile() string {
|
||||
return rootify(cfg.PrivValidatorState, cfg.RootDir)
|
||||
}
|
||||
|
||||
// OldPrivValidatorFile returns the full path of the priv_validator.json from pre v0.28.0.
|
||||
// TODO: eventually remove.
|
||||
func (cfg BaseConfig) OldPrivValidatorFile() string {
|
||||
return rootify(oldPrivValPath, cfg.RootDir)
|
||||
}
|
||||
|
||||
// NodeKeyFile returns the full path to the node_key.json file
|
||||
@@ -283,7 +307,7 @@ type RPCConfig struct {
|
||||
|
||||
// Maximum number of simultaneous connections.
|
||||
// Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
|
||||
// If you want to accept more significant number than the default, make sure
|
||||
// If you want to accept a larger number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
GRPCMaxOpenConnections int `mapstructure:"grpc_max_open_connections"`
|
||||
@@ -293,7 +317,7 @@ type RPCConfig struct {
|
||||
|
||||
// Maximum number of simultaneous connections (including WebSocket).
|
||||
// Does not include gRPC connections. See grpc_max_open_connections
|
||||
// If you want to accept more significant number than the default, make sure
|
||||
// If you want to accept a larger number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
// Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
@@ -434,7 +458,7 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
RecvRate: 5120000, // 5 mB/s
|
||||
PexReactor: true,
|
||||
SeedMode: false,
|
||||
AllowDuplicateIP: true, // so non-breaking yet
|
||||
AllowDuplicateIP: false,
|
||||
HandshakeTimeout: 20 * time.Second,
|
||||
DialTimeout: 3 * time.Second,
|
||||
TestDialFail: false,
|
||||
@@ -774,12 +798,12 @@ type InstrumentationConfig struct {
|
||||
PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"`
|
||||
|
||||
// Maximum number of simultaneous connections.
|
||||
// If you want to accept more significant number than the default, make sure
|
||||
// If you want to accept a larger number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
MaxOpenConnections int `mapstructure:"max_open_connections"`
|
||||
|
||||
// Tendermint instrumentation namespace.
|
||||
// Instrumentation namespace.
|
||||
Namespace string `mapstructure:"namespace"`
|
||||
}
|
||||
|
||||
|
@@ -2,13 +2,17 @@ package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"text/template"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
// DefaultDirPerm is the default permissions used when creating directories.
|
||||
const DefaultDirPerm = 0700
|
||||
|
||||
var configTemplate *template.Template
|
||||
|
||||
func init() {
|
||||
@@ -23,13 +27,13 @@ func init() {
|
||||
// EnsureRoot creates the root, config, and data directories if they don't exist,
|
||||
// and panics if it fails.
|
||||
func EnsureRoot(rootDir string) {
|
||||
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
|
||||
if err := cmn.EnsureDir(rootDir, DefaultDirPerm); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
|
||||
@@ -95,7 +99,10 @@ log_format = "{{ .BaseConfig.LogFormat }}"
|
||||
genesis_file = "{{ js .BaseConfig.Genesis }}"
|
||||
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
priv_validator_file = "{{ js .BaseConfig.PrivValidator }}"
|
||||
priv_validator_key_file = "{{ js .BaseConfig.PrivValidatorKey }}"
|
||||
|
||||
# Path to the JSON file containing the last sign state of a validator
|
||||
priv_validator_state_file = "{{ js .BaseConfig.PrivValidatorState }}"
|
||||
|
||||
# TCP or UNIX socket address for Tendermint to listen on for
|
||||
# connections from an external PrivValidator process
|
||||
@@ -125,13 +132,13 @@ laddr = "{{ .RPC.ListenAddress }}"
|
||||
# A list of origins a cross-domain request can be executed from
|
||||
# Default value '[]' disables cors support
|
||||
# Use '["*"]' to allow any origin
|
||||
cors_allowed_origins = "{{ .RPC.CORSAllowedOrigins }}"
|
||||
cors_allowed_origins = [{{ range .RPC.CORSAllowedOrigins }}{{ printf "%q, " . }}{{end}}]
|
||||
|
||||
# A list of methods the client is allowed to use with cross-domain requests
|
||||
cors_allowed_methods = "{{ .RPC.CORSAllowedMethods }}"
|
||||
cors_allowed_methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }}{{end}}]
|
||||
|
||||
# A list of non simple headers the client is allowed to use with cross-domain requests
|
||||
cors_allowed_headers = "{{ .RPC.CORSAllowedHeaders }}"
|
||||
cors_allowed_headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}]
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
@@ -139,7 +146,7 @@ grpc_laddr = "{{ .RPC.GRPCListenAddress }}"
|
||||
|
||||
# Maximum number of simultaneous connections.
|
||||
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
|
||||
# If you want to accept more significant number than the default, make sure
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
@@ -151,7 +158,7 @@ unsafe = {{ .RPC.Unsafe }}
|
||||
|
||||
# Maximum number of simultaneous connections (including WebSocket).
|
||||
# Does not include gRPC connections. See grpc_max_open_connections
|
||||
# If you want to accept more significant number than the default, make sure
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
@@ -269,8 +276,8 @@ blocktime_iota = "{{ .Consensus.BlockTimeIota }}"
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null" (default)
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
# 1) "null"
|
||||
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "{{ .TxIndex.Indexer }}"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is "tx.hash")
|
||||
@@ -302,7 +309,7 @@ prometheus = {{ .Instrumentation.Prometheus }}
|
||||
prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}"
|
||||
|
||||
# Maximum number of simultaneous connections.
|
||||
# If you want to accept more significant number than the default, make sure
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
max_open_connections = {{ .Instrumentation.MaxOpenConnections }}
|
||||
@@ -314,53 +321,51 @@ namespace = "{{ .Instrumentation.Namespace }}"
|
||||
/****** these are for test settings ***********/
|
||||
|
||||
func ResetTestRoot(testName string) *Config {
|
||||
rootDir := os.ExpandEnv("$HOME/.tendermint_test")
|
||||
rootDir = filepath.Join(rootDir, testName)
|
||||
// Remove ~/.tendermint_test_bak
|
||||
if cmn.FileExists(rootDir + "_bak") {
|
||||
if err := os.RemoveAll(rootDir + "_bak"); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
return ResetTestRootWithChainID(testName, "")
|
||||
}
|
||||
|
||||
func ResetTestRootWithChainID(testName string, chainID string) *Config {
|
||||
// create a unique, concurrency-safe test directory under os.TempDir()
|
||||
rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Move ~/.tendermint_test to ~/.tendermint_test_bak
|
||||
if cmn.FileExists(rootDir) {
|
||||
if err := os.Rename(rootDir, rootDir+"_bak"); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
// ensure config and data subdirs are created
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Create new dir
|
||||
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
baseConfig := DefaultBaseConfig()
|
||||
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
|
||||
genesisFilePath := filepath.Join(rootDir, baseConfig.Genesis)
|
||||
privFilePath := filepath.Join(rootDir, baseConfig.PrivValidator)
|
||||
privKeyFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorKey)
|
||||
privStateFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorState)
|
||||
|
||||
// Write default config file if missing.
|
||||
if !cmn.FileExists(configFilePath) {
|
||||
writeDefaultConfigFile(configFilePath)
|
||||
}
|
||||
if !cmn.FileExists(genesisFilePath) {
|
||||
if chainID == "" {
|
||||
chainID = "tendermint_test"
|
||||
}
|
||||
testGenesis := fmt.Sprintf(testGenesisFmt, chainID)
|
||||
cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
|
||||
}
|
||||
// we always overwrite the priv val
|
||||
cmn.MustWriteFile(privFilePath, []byte(testPrivValidator), 0644)
|
||||
cmn.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644)
|
||||
cmn.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644)
|
||||
|
||||
config := TestConfig().SetRoot(rootDir)
|
||||
return config
|
||||
}
|
||||
|
||||
var testGenesis = `{
|
||||
var testGenesisFmt = `{
|
||||
"genesis_time": "2018-10-10T08:20:13.695936996Z",
|
||||
"chain_id": "tendermint_test",
|
||||
"chain_id": "%s",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
@@ -374,7 +379,7 @@ var testGenesis = `{
|
||||
"app_hash": ""
|
||||
}`
|
||||
|
||||
var testPrivValidator = `{
|
||||
var testPrivValidatorKey = `{
|
||||
"address": "A3258DCBF45DCA0DF052981870F2D1441A36D145",
|
||||
"pub_key": {
|
||||
"type": "tendermint/PubKeyEd25519",
|
||||
@@ -383,8 +388,11 @@ var testPrivValidator = `{
|
||||
"priv_key": {
|
||||
"type": "tendermint/PrivKeyEd25519",
|
||||
"value": "EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="
|
||||
},
|
||||
"last_height": "0",
|
||||
"last_round": "0",
|
||||
"last_step": 0
|
||||
}
|
||||
}`
|
||||
|
||||
var testPrivValidatorState = `{
|
||||
"height": "0",
|
||||
"round": "0",
|
||||
"step": 0
|
||||
}`
|
||||
|
@@ -48,6 +48,7 @@ func TestEnsureTestRoot(t *testing.T) {
|
||||
|
||||
// create root dir
|
||||
cfg := ResetTestRoot(testName)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
rootDir := cfg.RootDir
|
||||
|
||||
// make sure config is set properly
|
||||
@@ -60,7 +61,7 @@ func TestEnsureTestRoot(t *testing.T) {
|
||||
|
||||
// TODO: make sure the cfg returned and testconfig are the same!
|
||||
baseConfig := DefaultBaseConfig()
|
||||
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, baseConfig.PrivValidator)
|
||||
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, baseConfig.PrivValidatorKey, baseConfig.PrivValidatorState)
|
||||
}
|
||||
|
||||
func checkConfig(configFile string) bool {
|
||||
|
@@ -13,10 +13,6 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
config = ResetConfig("consensus_byzantine_test")
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// byzantine failures
|
||||
|
||||
@@ -29,7 +25,8 @@ func init() {
|
||||
func TestByzantine(t *testing.T) {
|
||||
N := 4
|
||||
logger := consensusLogger().With("test", "byzantine")
|
||||
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
|
||||
css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
|
||||
defer cleanup()
|
||||
|
||||
// give the byzantine validator a normal ticker
|
||||
ticker := NewTimeoutTicker()
|
||||
@@ -76,8 +73,7 @@ func TestByzantine(t *testing.T) {
|
||||
conR.SetLogger(logger.With("validator", i))
|
||||
conR.SetEventBus(eventBus)
|
||||
|
||||
var conRI p2p.Reactor // nolint: gotype, gosimple
|
||||
conRI = conR
|
||||
var conRI p2p.Reactor = conR
|
||||
|
||||
// make first val byzantine
|
||||
if i == 0 {
|
||||
|
@@ -6,14 +6,18 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log/term"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
@@ -27,19 +31,19 @@ import (
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
|
||||
"github.com/go-kit/kit/log/term"
|
||||
)
|
||||
|
||||
const (
|
||||
testSubscriber = "test-client"
|
||||
)
|
||||
|
||||
// A cleanupFunc cleans up any config / test files created for a particular
|
||||
// test.
|
||||
type cleanupFunc func()
|
||||
|
||||
// genesis, chain_id, priv_val
|
||||
var config *cfg.Config // NOTE: must be reset for each _test.go file
|
||||
var consensusReplayConfig *cfg.Config
|
||||
var ensureTimeout = time.Millisecond * 100
|
||||
|
||||
func ensureDir(dir string, mode os.FileMode) {
|
||||
@@ -72,9 +76,10 @@ func NewValidatorStub(privValidator types.PrivValidator, valIndex int) *validato
|
||||
}
|
||||
|
||||
func (vs *validatorStub) signVote(voteType types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
|
||||
addr := vs.PrivValidator.GetPubKey().Address()
|
||||
vote := &types.Vote{
|
||||
ValidatorIndex: vs.Index,
|
||||
ValidatorAddress: vs.PrivValidator.GetAddress(),
|
||||
ValidatorAddress: addr,
|
||||
Height: vs.Height,
|
||||
Round: vs.Round,
|
||||
Timestamp: tmtime.Now(),
|
||||
@@ -124,15 +129,21 @@ func startTestRound(cs *ConsensusState, height int64, round int) {
|
||||
|
||||
// Create proposal block from cs1 but sign it with vs
|
||||
func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round int) (proposal *types.Proposal, block *types.Block) {
|
||||
cs1.mtx.Lock()
|
||||
block, blockParts := cs1.createProposalBlock()
|
||||
cs1.mtx.Unlock()
|
||||
if block == nil { // on error
|
||||
panic("error creating proposal block")
|
||||
}
|
||||
|
||||
// Make proposal
|
||||
polRound, propBlockID := cs1.ValidRound, types.BlockID{block.Hash(), blockParts.Header()}
|
||||
cs1.mtx.RLock()
|
||||
validRound := cs1.ValidRound
|
||||
chainID := cs1.state.ChainID
|
||||
cs1.mtx.RUnlock()
|
||||
polRound, propBlockID := validRound, types.BlockID{block.Hash(), blockParts.Header()}
|
||||
proposal = types.NewProposal(height, round, polRound, propBlockID)
|
||||
if err := vs.SignProposal(cs1.state.ChainID, proposal); err != nil {
|
||||
if err := vs.SignProposal(chainID, proposal); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
@@ -151,8 +162,9 @@ func signAddVotes(to *ConsensusState, voteType types.SignedMsgType, hash []byte,
|
||||
|
||||
func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *validatorStub, blockHash []byte) {
|
||||
prevotes := cs.Votes.Prevotes(round)
|
||||
address := privVal.GetPubKey().Address()
|
||||
var vote *types.Vote
|
||||
if vote = prevotes.GetByAddress(privVal.GetAddress()); vote == nil {
|
||||
if vote = prevotes.GetByAddress(address); vote == nil {
|
||||
panic("Failed to find prevote from validator")
|
||||
}
|
||||
if blockHash == nil {
|
||||
@@ -168,8 +180,9 @@ func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *valid
|
||||
|
||||
func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorStub, blockHash []byte) {
|
||||
votes := cs.LastCommit
|
||||
address := privVal.GetPubKey().Address()
|
||||
var vote *types.Vote
|
||||
if vote = votes.GetByAddress(privVal.GetAddress()); vote == nil {
|
||||
if vote = votes.GetByAddress(address); vote == nil {
|
||||
panic("Failed to find precommit from validator")
|
||||
}
|
||||
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
|
||||
@@ -179,8 +192,9 @@ func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorS
|
||||
|
||||
func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) {
|
||||
precommits := cs.Votes.Precommits(thisRound)
|
||||
address := privVal.GetPubKey().Address()
|
||||
var vote *types.Vote
|
||||
if vote = precommits.GetByAddress(privVal.GetAddress()); vote == nil {
|
||||
if vote = precommits.GetByAddress(address); vote == nil {
|
||||
panic("Failed to find precommit from validator")
|
||||
}
|
||||
|
||||
@@ -239,6 +253,7 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
|
||||
// consensus states
|
||||
|
||||
func newConsensusState(state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
|
||||
config := cfg.ResetTestRoot("consensus_state_test")
|
||||
return newConsensusStateWithConfig(config, state, pv, app)
|
||||
}
|
||||
|
||||
@@ -281,9 +296,10 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
|
||||
}
|
||||
|
||||
func loadPrivValidator(config *cfg.Config) *privval.FilePV {
|
||||
privValidatorFile := config.PrivValidatorFile()
|
||||
ensureDir(path.Dir(privValidatorFile), 0700)
|
||||
privValidator := privval.LoadOrGenFilePV(privValidatorFile)
|
||||
privValidatorKeyFile := config.PrivValidatorKeyFile()
|
||||
ensureDir(filepath.Dir(privValidatorKeyFile), 0700)
|
||||
privValidatorStateFile := config.PrivValidatorStateFile()
|
||||
privValidator := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile)
|
||||
privValidator.Reset()
|
||||
return privValidator
|
||||
}
|
||||
@@ -374,36 +390,6 @@ func ensureNewEvent(
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewRoundStep(stepCh <-chan interface{}, height int64, round int) {
|
||||
ensureNewEvent(
|
||||
stepCh,
|
||||
height,
|
||||
round,
|
||||
ensureTimeout,
|
||||
"Timeout expired while waiting for NewStep event")
|
||||
}
|
||||
|
||||
func ensureNewVote(voteCh <-chan interface{}, height int64, round int) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
break
|
||||
case v := <-voteCh:
|
||||
edv, ok := v.(types.EventDataVote)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a *types.Vote, "+
|
||||
"got %v. wrong subscription channel?",
|
||||
reflect.TypeOf(v)))
|
||||
}
|
||||
vote := edv.Vote
|
||||
if vote.Height != height {
|
||||
panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height))
|
||||
}
|
||||
if vote.Round != round {
|
||||
panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewRound(roundCh <-chan interface{}, height int64, round int) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
@@ -425,22 +411,8 @@ func ensureNewRound(roundCh <-chan interface{}, height int64, round int) {
|
||||
}
|
||||
}
|
||||
|
||||
func ensureProposalHeartbeat(heartbeatCh <-chan interface{}) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for ProposalHeartbeat event")
|
||||
case ev := <-heartbeatCh:
|
||||
heartbeat, ok := ev.(types.EventDataProposalHeartbeat)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a *types.EventDataProposalHeartbeat, "+
|
||||
"got %v. wrong subscription channel?",
|
||||
reflect.TypeOf(heartbeat)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewTimeout(timeoutCh <-chan interface{}, height int64, round int, timeout int64) {
|
||||
timeoutDuration := time.Duration(timeout*3) * time.Nanosecond
|
||||
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
|
||||
ensureNewEvent(timeoutCh, height, round, timeoutDuration,
|
||||
"Timeout expired while waiting for NewTimeout event")
|
||||
}
|
||||
@@ -594,18 +566,21 @@ func consensusLogger() log.Logger {
|
||||
}).With("module", "consensus")
|
||||
}
|
||||
|
||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application, configOpts ...func(*cfg.Config)) []*ConsensusState {
|
||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker,
|
||||
appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*ConsensusState, cleanupFunc) {
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
css := make([]*ConsensusState, nValidators)
|
||||
logger := consensusLogger()
|
||||
configRootDirs := make([]string, 0, nValidators)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
for _, opt := range configOpts {
|
||||
opt(thisConfig)
|
||||
}
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
@@ -614,28 +589,41 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||
}
|
||||
return css
|
||||
return css, func() {
|
||||
for _, dir := range configRootDirs {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nPeers = nValidators + nNotValidator
|
||||
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
|
||||
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker,
|
||||
appFunc func() abci.Application) ([]*ConsensusState, cleanupFunc) {
|
||||
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
|
||||
css := make([]*ConsensusState, nPeers)
|
||||
logger := consensusLogger()
|
||||
configRootDirs := make([]string, 0, nPeers)
|
||||
for i := 0; i < nPeers; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
configRootDirs = append(configRootDirs, thisConfig.RootDir)
|
||||
ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
var privVal types.PrivValidator
|
||||
if i < nValidators {
|
||||
privVal = privVals[i]
|
||||
} else {
|
||||
tempFile, err := ioutil.TempFile("", "priv_validator_")
|
||||
tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
privVal = privval.GenFilePV(tempFile.Name())
|
||||
tempStateFile, err := ioutil.TempFile("", "priv_validator_state_")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
privVal = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name())
|
||||
}
|
||||
|
||||
app := appFunc()
|
||||
@@ -646,7 +634,11 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
||||
css[i].SetTimeoutTicker(tickerFunc())
|
||||
css[i].SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||
}
|
||||
return css
|
||||
return css, func() {
|
||||
for _, dir := range configRootDirs {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
|
||||
@@ -656,7 +648,6 @@ func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
|
||||
}
|
||||
}
|
||||
panic("didnt find peer in switches")
|
||||
return -1
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
@@ -734,8 +725,7 @@ func (m *mockTicker) Chan() <-chan timeoutInfo {
|
||||
return m.c
|
||||
}
|
||||
|
||||
func (mockTicker) SetLogger(log.Logger) {
|
||||
}
|
||||
func (*mockTicker) SetLogger(log.Logger) {}
|
||||
|
||||
//------------------------------------
|
||||
|
||||
@@ -744,6 +734,9 @@ func newCounter() abci.Application {
|
||||
}
|
||||
|
||||
func newPersistentKVStore() abci.Application {
|
||||
dir, _ := ioutil.TempDir("/tmp", "persistent-kvstore")
|
||||
dir, err := ioutil.TempDir("", "persistent-kvstore")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return kvstore.NewPersistentKVStoreApplication(dir)
|
||||
}
|
||||
|
@@ -3,6 +3,7 @@ package consensus
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -10,19 +11,22 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
config = ResetConfig("consensus_mempool_test")
|
||||
// for testing
|
||||
func assertMempool(txn txNotifier) sm.Mempool {
|
||||
return txn.(sm.Mempool)
|
||||
}
|
||||
|
||||
func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.Consensus.CreateEmptyBlocks = false
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
cs.mempool.EnableTxsAvailable()
|
||||
assertMempool(cs.txNotifier).EnableTxsAvailable()
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
|
||||
startTestRound(cs, height, round)
|
||||
@@ -37,10 +41,11 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
|
||||
func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.Consensus.CreateEmptyBlocksInterval = ensureTimeout
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
cs.mempool.EnableTxsAvailable()
|
||||
assertMempool(cs.txNotifier).EnableTxsAvailable()
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
|
||||
startTestRound(cs, height, round)
|
||||
@@ -52,10 +57,11 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
||||
|
||||
func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
config.Consensus.CreateEmptyBlocks = false
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
cs.mempool.EnableTxsAvailable()
|
||||
assertMempool(cs.txNotifier).EnableTxsAvailable()
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
|
||||
newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound)
|
||||
@@ -91,7 +97,7 @@ func deliverTxsRange(cs *ConsensusState, start, end int) {
|
||||
for i := start; i < end; i++ {
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
||||
err := cs.mempool.CheckTx(txBytes, nil)
|
||||
err := assertMempool(cs.txNotifier).CheckTx(txBytes, nil)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error after CheckTx: %v", err))
|
||||
}
|
||||
@@ -141,7 +147,7 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
// Try to send the tx through the mempool.
|
||||
// CheckTx should not err, but the app should return a bad abci code
|
||||
// and the tx should get removed from the pool
|
||||
err := cs.mempool.CheckTx(txBytes, func(r *abci.Response) {
|
||||
err := assertMempool(cs.txNotifier).CheckTx(txBytes, func(r *abci.Response) {
|
||||
if r.GetCheckTx().Code != code.CodeTypeBadNonce {
|
||||
t.Fatalf("expected checktx to return bad nonce, got %v", r)
|
||||
}
|
||||
@@ -153,7 +159,7 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
|
||||
// check for the tx
|
||||
for {
|
||||
txs := cs.mempool.ReapMaxBytesMaxGas(int64(len(txBytes)), -1)
|
||||
txs := assertMempool(cs.txNotifier).ReapMaxBytesMaxGas(int64(len(txBytes)), -1)
|
||||
if len(txs) == 0 {
|
||||
emptyMempoolCh <- struct{}{}
|
||||
return
|
||||
|
@@ -8,7 +8,11 @@ import (
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const MetricsSubsystem = "consensus"
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "consensus"
|
||||
)
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
@@ -50,101 +54,107 @@ type Metrics struct {
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns Metrics build using Prometheus client library.
|
||||
func PrometheusMetrics(namespace string) *Metrics {
|
||||
// Optionally, labels can be provided along with their values ("foo",
|
||||
// "fooValue").
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "height",
|
||||
Help: "Height of the chain.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "rounds",
|
||||
Help: "Number of rounds.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "validators",
|
||||
Help: "Number of validators.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "validators_power",
|
||||
Help: "Total power of all validators.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "missing_validators",
|
||||
Help: "Number of validators who did not sign.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "missing_validators_power",
|
||||
Help: "Total power of the missing validators.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "byzantine_validators",
|
||||
Help: "Number of validators who tried to double sign.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "byzantine_validators_power",
|
||||
Help: "Total power of the byzantine validators.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
BlockIntervalSeconds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_interval_seconds",
|
||||
Help: "Time between this and the last block.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
|
||||
NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "num_txs",
|
||||
Help: "Number of transactions.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockSizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_size_bytes",
|
||||
Help: "Size of the block.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "total_txs",
|
||||
Help: "Total number of transactions.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
CommittedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "latest_block_height",
|
||||
Help: "The latest block height.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
FastSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "fast_syncing",
|
||||
Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.",
|
||||
}, []string{}),
|
||||
}, labels).With(labelsAndValues...),
|
||||
BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "block_parts",
|
||||
Help: "Number of blockparts transmitted by peer.",
|
||||
}, []string{"peer_id"}),
|
||||
}, append(labels, "peer_id")).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -264,11 +264,6 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
BlockID: msg.BlockID,
|
||||
Votes: ourVotes,
|
||||
}))
|
||||
case *ProposalHeartbeatMessage:
|
||||
hb := msg.Heartbeat
|
||||
conR.Logger.Debug("Received proposal heartbeat message",
|
||||
"height", hb.Height, "round", hb.Round, "sequence", hb.Sequence,
|
||||
"valIdx", hb.ValidatorIndex, "valAddr", hb.ValidatorAddress)
|
||||
default:
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -369,8 +364,8 @@ func (conR *ConsensusReactor) FastSync() bool {
|
||||
|
||||
//--------------------------------------
|
||||
|
||||
// subscribeToBroadcastEvents subscribes for new round steps, votes and
|
||||
// proposal heartbeats using internal pubsub defined on state to broadcast
|
||||
// subscribeToBroadcastEvents subscribes for new round steps and votes
|
||||
// using internal pubsub defined on state to broadcast
|
||||
// them to peers upon receiving.
|
||||
func (conR *ConsensusReactor) subscribeToBroadcastEvents() {
|
||||
const subscriber = "consensus-reactor"
|
||||
@@ -389,10 +384,6 @@ func (conR *ConsensusReactor) subscribeToBroadcastEvents() {
|
||||
conR.broadcastHasVoteMessage(data.(*types.Vote))
|
||||
})
|
||||
|
||||
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventProposalHeartbeat,
|
||||
func(data tmevents.EventData) {
|
||||
conR.broadcastProposalHeartbeatMessage(data.(*types.Heartbeat))
|
||||
})
|
||||
}
|
||||
|
||||
func (conR *ConsensusReactor) unsubscribeFromBroadcastEvents() {
|
||||
@@ -400,13 +391,6 @@ func (conR *ConsensusReactor) unsubscribeFromBroadcastEvents() {
|
||||
conR.conS.evsw.RemoveListener(subscriber)
|
||||
}
|
||||
|
||||
func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(hb *types.Heartbeat) {
|
||||
conR.Logger.Debug("Broadcasting proposal heartbeat message",
|
||||
"height", hb.Height, "round", hb.Round, "sequence", hb.Sequence, "address", hb.ValidatorAddress)
|
||||
msg := &ProposalHeartbeatMessage{hb}
|
||||
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
|
||||
}
|
||||
|
||||
func (conR *ConsensusReactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
|
||||
nrsMsg := makeRoundStepMessage(rs)
|
||||
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
|
||||
@@ -454,9 +438,9 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
|
||||
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) {
|
||||
nrsMsg = &NewRoundStepMessage{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
Step: rs.Step,
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
Step: rs.Step,
|
||||
SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()),
|
||||
LastCommitRound: rs.LastCommit.Round(),
|
||||
}
|
||||
@@ -912,7 +896,7 @@ type PeerState struct {
|
||||
peer p2p.Peer
|
||||
logger log.Logger
|
||||
|
||||
mtx sync.Mutex `json:"-"` // NOTE: Modify below using setters, never directly.
|
||||
mtx sync.Mutex // NOTE: Modify below using setters, never directly.
|
||||
PRS cstypes.PeerRoundState `json:"round_state"` // Exposed.
|
||||
Stats *peerStateStats `json:"stats"` // Exposed.
|
||||
}
|
||||
@@ -1387,7 +1371,6 @@ func RegisterConsensusMessages(cdc *amino.Codec) {
|
||||
cdc.RegisterConcrete(&HasVoteMessage{}, "tendermint/HasVote", nil)
|
||||
cdc.RegisterConcrete(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23", nil)
|
||||
cdc.RegisterConcrete(&VoteSetBitsMessage{}, "tendermint/VoteSetBits", nil)
|
||||
cdc.RegisterConcrete(&ProposalHeartbeatMessage{}, "tendermint/ProposalHeartbeat", nil)
|
||||
}
|
||||
|
||||
func decodeMsg(bz []byte) (msg ConsensusMessage, err error) {
|
||||
@@ -1664,18 +1647,3 @@ func (m *VoteSetBitsMessage) String() string {
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// ProposalHeartbeatMessage is sent to signal that a node is alive and waiting for transactions for a proposal.
|
||||
type ProposalHeartbeatMessage struct {
|
||||
Heartbeat *types.Heartbeat
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *ProposalHeartbeatMessage) ValidateBasic() error {
|
||||
return m.Heartbeat.ValidateBasic()
|
||||
}
|
||||
|
||||
// String returns a string representation.
|
||||
func (m *ProposalHeartbeatMessage) String() string {
|
||||
return fmt.Sprintf("[HEARTBEAT %v]", m.Heartbeat)
|
||||
}
|
||||
|
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/client"
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
@@ -27,10 +27,6 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
config = ResetConfig("consensus_reactor_test")
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// in-process testnets
|
||||
|
||||
@@ -86,7 +82,8 @@ func stopConsensusNet(logger log.Logger, reactors []*ConsensusReactor, eventBuse
|
||||
// Ensure a testnet makes blocks
|
||||
func TestReactorBasic(t *testing.T) {
|
||||
N := 4
|
||||
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
// wait till everyone makes the first new block
|
||||
@@ -116,6 +113,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
@@ -143,7 +141,8 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
// mock the evidence pool
|
||||
// everyone includes evidence of another double signing
|
||||
vIdx := (i + 1) % nValidators
|
||||
evpool := newMockEvidencePool(privVals[vIdx].GetAddress())
|
||||
addr := privVals[vIdx].GetPubKey().Address()
|
||||
evpool := newMockEvidencePool(addr)
|
||||
|
||||
// Make ConsensusState
|
||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
|
||||
@@ -210,32 +209,23 @@ func (m *mockEvidencePool) Update(block *types.Block, state sm.State) {
|
||||
}
|
||||
m.height++
|
||||
}
|
||||
func (m *mockEvidencePool) IsCommitted(types.Evidence) bool { return false }
|
||||
|
||||
//------------------------------------
|
||||
|
||||
// Ensure a testnet sends proposal heartbeats and makes blocks when there are txs
|
||||
func TestReactorProposalHeartbeats(t *testing.T) {
|
||||
// Ensure a testnet makes blocks when there are txs
|
||||
func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
N := 4
|
||||
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
|
||||
func(c *cfg.Config) {
|
||||
c.Consensus.CreateEmptyBlocks = false
|
||||
})
|
||||
defer cleanup()
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
heartbeatChans := make([]chan interface{}, N)
|
||||
var err error
|
||||
for i := 0; i < N; i++ {
|
||||
heartbeatChans[i] = make(chan interface{}, 1)
|
||||
err = eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryProposalHeartbeat, heartbeatChans[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// wait till everyone sends a proposal heartbeat
|
||||
timeoutWaitGroup(t, N, func(j int) {
|
||||
<-heartbeatChans[j]
|
||||
}, css)
|
||||
|
||||
// send a tx
|
||||
if err := css[3].mempool.CheckTx([]byte{1, 2, 3}, nil); err != nil {
|
||||
if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil); err != nil {
|
||||
//t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -248,7 +238,8 @@ func TestReactorProposalHeartbeats(t *testing.T) {
|
||||
// Test we record stats about votes and block parts from other peers.
|
||||
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
N := 4
|
||||
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
@@ -272,14 +263,16 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
func TestReactorVotingPowerChange(t *testing.T) {
|
||||
nVals := 4
|
||||
logger := log.TestingLogger()
|
||||
css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
css, cleanup := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
defer cleanup()
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nVals)
|
||||
defer stopConsensusNet(logger, reactors, eventBuses)
|
||||
|
||||
// map of active validators
|
||||
activeVals := make(map[string]struct{})
|
||||
for i := 0; i < nVals; i++ {
|
||||
activeVals[string(css[i].privValidator.GetAddress())] = struct{}{}
|
||||
addr := css[i].privValidator.GetPubKey().Address()
|
||||
activeVals[string(addr)] = struct{}{}
|
||||
}
|
||||
|
||||
// wait till everyone makes block 1
|
||||
@@ -332,8 +325,8 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
nPeers := 7
|
||||
nVals := 4
|
||||
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
|
||||
css, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
defer cleanup()
|
||||
logger := log.TestingLogger()
|
||||
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nPeers)
|
||||
@@ -342,7 +335,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
// map of active validators
|
||||
activeVals := make(map[string]struct{})
|
||||
for i := 0; i < nVals; i++ {
|
||||
activeVals[string(css[i].privValidator.GetAddress())] = struct{}{}
|
||||
addr := css[i].privValidator.GetPubKey().Address()
|
||||
activeVals[string(addr)] = struct{}{}
|
||||
}
|
||||
|
||||
// wait till everyone makes block 1
|
||||
@@ -429,7 +423,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
// Check we can make blocks with skip_timeout_commit=false
|
||||
func TestReactorWithTimeoutCommit(t *testing.T) {
|
||||
N := 4
|
||||
css := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
|
||||
defer cleanup()
|
||||
// override default SkipTimeoutCommit == true for tests
|
||||
for i := 0; i < N; i++ {
|
||||
css[i].config.SkipTimeoutCommit = false
|
||||
@@ -456,7 +451,7 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}
|
||||
err := validateBlock(newBlock, activeVals)
|
||||
assert.Nil(t, err)
|
||||
for _, tx := range txs {
|
||||
err := css[j].mempool.CheckTx(tx, nil)
|
||||
err := assertMempool(css[j].txNotifier).CheckTx(tx, nil)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}, css)
|
||||
|
@@ -6,12 +6,12 @@ import (
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
//"strconv"
|
||||
//"strings"
|
||||
"time"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
//auto "github.com/tendermint/tendermint/libs/autofile"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
var crc32c = crc32.MakeTable(crc32.Castagnoli)
|
||||
@@ -143,8 +144,8 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if IsDataCorruptionError(err) {
|
||||
cs.Logger.Debug("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight)
|
||||
panic(fmt.Sprintf("data has been corrupted (%v) in last height %d of consensus WAL", err, csHeight))
|
||||
cs.Logger.Error("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight)
|
||||
return err
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -196,6 +197,7 @@ type Handshaker struct {
|
||||
stateDB dbm.DB
|
||||
initialState sm.State
|
||||
store sm.BlockStore
|
||||
eventBus types.BlockEventPublisher
|
||||
genDoc *types.GenesisDoc
|
||||
logger log.Logger
|
||||
|
||||
@@ -209,6 +211,7 @@ func NewHandshaker(stateDB dbm.DB, state sm.State,
|
||||
stateDB: stateDB,
|
||||
initialState: state,
|
||||
store: store,
|
||||
eventBus: types.NopEventBus{},
|
||||
genDoc: genDoc,
|
||||
logger: log.NewNopLogger(),
|
||||
nBlocks: 0,
|
||||
@@ -219,6 +222,12 @@ func (h *Handshaker) SetLogger(l log.Logger) {
|
||||
h.logger = l
|
||||
}
|
||||
|
||||
// SetEventBus - sets the event bus for publishing block related events.
|
||||
// If not called, it defaults to types.NopEventBus.
|
||||
func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) {
|
||||
h.eventBus = eventBus
|
||||
}
|
||||
|
||||
func (h *Handshaker) NBlocks() int {
|
||||
return h.nBlocks
|
||||
}
|
||||
@@ -247,6 +256,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
|
||||
// Set AppVersion on the state.
|
||||
h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion)
|
||||
sm.SaveState(h.stateDB, h.initialState)
|
||||
|
||||
// Replay blocks up to the latest in the blockstore.
|
||||
_, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp)
|
||||
@@ -295,19 +305,27 @@ func (h *Handshaker) ReplayBlocks(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the app returned validators or consensus params, update the state.
|
||||
if len(res.Validators) > 0 {
|
||||
vals, err := types.PB2TM.ValidatorUpdates(res.Validators)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if stateBlockHeight == 0 { //we only update state when we are in initial state
|
||||
// If the app returned validators or consensus params, update the state.
|
||||
if len(res.Validators) > 0 {
|
||||
vals, err := types.PB2TM.ValidatorUpdates(res.Validators)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state.Validators = types.NewValidatorSet(vals)
|
||||
state.NextValidators = types.NewValidatorSet(vals)
|
||||
} else {
|
||||
// If validator set is not set in genesis and still empty after InitChain, exit.
|
||||
if len(h.genDoc.Validators) == 0 {
|
||||
return nil, fmt.Errorf("Validator set is nil in genesis and still empty after InitChain")
|
||||
}
|
||||
}
|
||||
state.Validators = types.NewValidatorSet(vals)
|
||||
state.NextValidators = types.NewValidatorSet(vals)
|
||||
|
||||
if res.ConsensusParams != nil {
|
||||
state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams)
|
||||
}
|
||||
sm.SaveState(h.stateDB, state)
|
||||
}
|
||||
if res.ConsensusParams != nil {
|
||||
state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams)
|
||||
}
|
||||
sm.SaveState(h.stateDB, state)
|
||||
}
|
||||
|
||||
// First handle edge cases and constraints on the storeBlockHeight.
|
||||
@@ -316,7 +334,7 @@ func (h *Handshaker) ReplayBlocks(
|
||||
|
||||
} else if storeBlockHeight < appBlockHeight {
|
||||
// the app should never be ahead of the store (but this is under app's control)
|
||||
return appHash, sm.ErrAppBlockHeightTooHigh{storeBlockHeight, appBlockHeight}
|
||||
return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight}
|
||||
|
||||
} else if storeBlockHeight < stateBlockHeight {
|
||||
// the state should never be ahead of the store (this is under tendermint's control)
|
||||
@@ -423,6 +441,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
|
||||
meta := h.store.LoadBlockMeta(height)
|
||||
|
||||
blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, sm.MockMempool{}, sm.MockEvidencePool{})
|
||||
blockExec.SetEventBus(h.eventBus)
|
||||
|
||||
var err error
|
||||
state, err = blockExec.ApplyBlock(state, meta.BlockID, block)
|
||||
|
@@ -103,7 +103,6 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
|
||||
}
|
||||
pb.count++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
@@ -137,7 +136,7 @@ func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
|
||||
pb.cs.Wait()
|
||||
|
||||
newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec,
|
||||
pb.cs.blockStore, pb.cs.mempool, pb.cs.evpool)
|
||||
pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool)
|
||||
newCS.SetEventBus(pb.cs.eventBus)
|
||||
newCS.startForReplay()
|
||||
|
||||
@@ -295,7 +294,6 @@ func (pb *playback) replayConsoleLoop() int {
|
||||
fmt.Println(pb.count)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
@@ -326,17 +324,18 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
cmn.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err))
|
||||
}
|
||||
|
||||
handshaker := NewHandshaker(stateDB, state, blockStore, gdoc)
|
||||
err = handshaker.Handshake(proxyApp)
|
||||
if err != nil {
|
||||
cmn.Exit(fmt.Sprintf("Error on handshake: %v", err))
|
||||
}
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
if err := eventBus.Start(); err != nil {
|
||||
cmn.Exit(fmt.Sprintf("Failed to start event bus: %v", err))
|
||||
}
|
||||
|
||||
handshaker := NewHandshaker(stateDB, state, blockStore, gdoc)
|
||||
handshaker.SetEventBus(eventBus)
|
||||
err = handshaker.Handshake(proxyApp)
|
||||
if err != nil {
|
||||
cmn.Exit(fmt.Sprintf("Error on handshake: %v", err))
|
||||
}
|
||||
|
||||
mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
||||
|
||||
|
@@ -17,23 +17,31 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
crypto "github.com/tendermint/tendermint/crypto"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
auto "github.com/tendermint/tendermint/libs/autofile"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
var consensusReplayConfig *cfg.Config
|
||||
|
||||
func init() {
|
||||
func TestMain(m *testing.M) {
|
||||
config = ResetConfig("consensus_reactor_test")
|
||||
consensusReplayConfig = ResetConfig("consensus_replay_test")
|
||||
configStateTest := ResetConfig("consensus_state_test")
|
||||
configMempoolTest := ResetConfig("consensus_mempool_test")
|
||||
configByzantineTest := ResetConfig("consensus_byzantine_test")
|
||||
code := m.Run()
|
||||
os.RemoveAll(config.RootDir)
|
||||
os.RemoveAll(consensusReplayConfig.RootDir)
|
||||
os.RemoveAll(configStateTest.RootDir)
|
||||
os.RemoveAll(configMempoolTest.RootDir)
|
||||
os.RemoveAll(configByzantineTest.RootDir)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// These tests ensure we can always recover from failure at any part of the consensus process.
|
||||
@@ -51,7 +59,8 @@ func init() {
|
||||
// and which ones we need the wal for - then we'd also be able to only flush the
|
||||
// wal writer when we need to, instead of with every message.
|
||||
|
||||
func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) {
|
||||
func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||
lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) {
|
||||
logger := log.TestingLogger()
|
||||
state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile())
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
@@ -59,7 +68,6 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64,
|
||||
cs.SetLogger(logger)
|
||||
|
||||
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
|
||||
// fmt.Printf("====== WAL: \n\r%s\n", bytes)
|
||||
t.Logf("====== WAL: \n\r%X\n", bytes)
|
||||
|
||||
err := cs.Start()
|
||||
@@ -87,7 +95,7 @@ func sendTxs(cs *ConsensusState, ctx context.Context) {
|
||||
return
|
||||
default:
|
||||
tx := []byte{byte(i)}
|
||||
cs.mempool.CheckTx(tx, nil)
|
||||
assertMempool(cs.txNotifier).CheckTx(tx, nil)
|
||||
i++
|
||||
}
|
||||
}
|
||||
@@ -110,21 +118,22 @@ func TestWALCrash(t *testing.T) {
|
||||
3},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
for i, tc := range testCases {
|
||||
consensusReplayConfig := ResetConfig(fmt.Sprintf("%s_%d", t.Name(), i))
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
crashWALandCheckLiveness(t, tc.initFn, tc.heightToStop)
|
||||
crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func crashWALandCheckLiveness(t *testing.T, initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) {
|
||||
func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config,
|
||||
initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) {
|
||||
walPaniced := make(chan error)
|
||||
crashingWal := &crashingWAL{panicCh: walPaniced, heightToStop: heightToStop}
|
||||
|
||||
i := 1
|
||||
LOOP:
|
||||
for {
|
||||
// fmt.Printf("====== LOOP %d\n", i)
|
||||
t.Logf("====== LOOP %d\n", i)
|
||||
|
||||
// create consensus state from a clean slate
|
||||
@@ -163,7 +172,7 @@ LOOP:
|
||||
t.Logf("WAL paniced: %v", err)
|
||||
|
||||
// make sure we can make blocks after a crash
|
||||
startNewConsensusStateAndWaitForBlock(t, cs.Height, blockDB, stateDB)
|
||||
startNewConsensusStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB)
|
||||
|
||||
// stop consensus state and transactions sender (initFn)
|
||||
cs.Stop()
|
||||
@@ -269,29 +278,37 @@ var modes = []uint{0, 1, 2}
|
||||
|
||||
// Sync from scratch
|
||||
func TestHandshakeReplayAll(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, 0, m)
|
||||
for i, m := range modes {
|
||||
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testHandshakeReplay(t, config, 0, m)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync many, not from scratch
|
||||
func TestHandshakeReplaySome(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, 1, m)
|
||||
for i, m := range modes {
|
||||
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testHandshakeReplay(t, config, 1, m)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from lagging by one
|
||||
func TestHandshakeReplayOne(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, NUM_BLOCKS-1, m)
|
||||
for i, m := range modes {
|
||||
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testHandshakeReplay(t, config, NUM_BLOCKS-1, m)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync from caught up
|
||||
func TestHandshakeReplayNone(t *testing.T) {
|
||||
for _, m := range modes {
|
||||
testHandshakeReplay(t, NUM_BLOCKS, m)
|
||||
for i, m := range modes {
|
||||
config := ResetConfig(fmt.Sprintf("%s_%v", t.Name(), i))
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
testHandshakeReplay(t, config, NUM_BLOCKS, m)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -311,15 +328,13 @@ func tempWALWithData(data []byte) string {
|
||||
}
|
||||
|
||||
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
|
||||
func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
config := ResetConfig("proxy_test_")
|
||||
|
||||
walBody, err := WALWithNBlocks(NUM_BLOCKS)
|
||||
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint) {
|
||||
walBody, err := WALWithNBlocks(t, NUM_BLOCKS)
|
||||
require.NoError(t, err)
|
||||
walFile := tempWALWithData(walBody)
|
||||
config.Consensus.SetWalFile(walFile)
|
||||
|
||||
privVal := privval.LoadFilePV(config.PrivValidatorFile())
|
||||
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
|
||||
wal, err := NewWAL(walFile)
|
||||
require.NoError(t, err)
|
||||
@@ -537,10 +552,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
}
|
||||
case *types.Vote:
|
||||
if p.Type == types.PrecommitType {
|
||||
thisBlockCommit = &types.Commit{
|
||||
BlockID: p.BlockID,
|
||||
Precommits: []*types.Vote{p},
|
||||
}
|
||||
commitSigs := []*types.CommitSig{p.CommitSig()}
|
||||
thisBlockCommit = types.NewCommit(p.BlockID, commitSigs)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -633,7 +646,8 @@ func TestInitChainUpdateValidators(t *testing.T) {
|
||||
clientCreator := proxy.NewLocalClientCreator(app)
|
||||
|
||||
config := ResetConfig("proxy_test_")
|
||||
privVal := privval.LoadFilePV(config.PrivValidatorFile())
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile())
|
||||
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0)
|
||||
|
||||
oldValAddr := state.Validators.Validators[0].Address
|
||||
@@ -659,12 +673,6 @@ func TestInitChainUpdateValidators(t *testing.T) {
|
||||
assert.Equal(t, newValAddr, expectValAddr)
|
||||
}
|
||||
|
||||
func newInitChainApp(vals []abci.ValidatorUpdate) *initChainApp {
|
||||
return &initChainApp{
|
||||
vals: vals,
|
||||
}
|
||||
}
|
||||
|
||||
// returns the vals on InitChain
|
||||
type initChainApp struct {
|
||||
abci.BaseApplication
|
||||
|
@@ -2,13 +2,14 @@ package consensus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/fail"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -22,13 +23,6 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Config
|
||||
|
||||
const (
|
||||
proposalHeartbeatIntervalSeconds = 2
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Errors
|
||||
|
||||
@@ -63,6 +57,16 @@ func (ti *timeoutInfo) String() string {
|
||||
return fmt.Sprintf("%v ; %d/%d %v", ti.Duration, ti.Height, ti.Round, ti.Step)
|
||||
}
|
||||
|
||||
// interface to the mempool
|
||||
type txNotifier interface {
|
||||
TxsAvailable() <-chan struct{}
|
||||
}
|
||||
|
||||
// interface to the evidence pool
|
||||
type evidencePool interface {
|
||||
AddEvidence(types.Evidence) error
|
||||
}
|
||||
|
||||
// ConsensusState handles execution of the consensus algorithm.
|
||||
// It processes votes and proposals, and upon reaching agreement,
|
||||
// commits blocks to the chain and executes them against the application.
|
||||
@@ -74,17 +78,23 @@ type ConsensusState struct {
|
||||
config *cfg.ConsensusConfig
|
||||
privValidator types.PrivValidator // for signing votes
|
||||
|
||||
// services for creating and executing blocks
|
||||
blockExec *sm.BlockExecutor
|
||||
// store blocks and commits
|
||||
blockStore sm.BlockStore
|
||||
mempool sm.Mempool
|
||||
evpool sm.EvidencePool
|
||||
|
||||
// create and execute blocks
|
||||
blockExec *sm.BlockExecutor
|
||||
|
||||
// notify us if txs are available
|
||||
txNotifier txNotifier
|
||||
|
||||
// add evidence to the pool
|
||||
// when it's detected
|
||||
evpool evidencePool
|
||||
|
||||
// internal state
|
||||
mtx sync.RWMutex
|
||||
cstypes.RoundState
|
||||
triggeredTimeoutPrecommit bool
|
||||
state sm.State // State until height-1.
|
||||
state sm.State // State until height-1.
|
||||
|
||||
// state changes may be triggered by: msgs from peers,
|
||||
// msgs from ourself, or by timeouts
|
||||
@@ -118,7 +128,7 @@ type ConsensusState struct {
|
||||
done chan struct{}
|
||||
|
||||
// synchronous pubsub between consensus state and reactor.
|
||||
// state only emits EventNewRoundStep, EventVote and EventProposalHeartbeat
|
||||
// state only emits EventNewRoundStep and EventVote
|
||||
evsw tmevents.EventSwitch
|
||||
|
||||
// for reporting metrics
|
||||
@@ -134,15 +144,15 @@ func NewConsensusState(
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
blockStore sm.BlockStore,
|
||||
mempool sm.Mempool,
|
||||
evpool sm.EvidencePool,
|
||||
txNotifier txNotifier,
|
||||
evpool evidencePool,
|
||||
options ...StateOption,
|
||||
) *ConsensusState {
|
||||
cs := &ConsensusState{
|
||||
config: config,
|
||||
blockExec: blockExec,
|
||||
blockStore: blockStore,
|
||||
mempool: mempool,
|
||||
txNotifier: txNotifier,
|
||||
peerMsgQueue: make(chan msgInfo, msgQueueSize),
|
||||
internalMsgQueue: make(chan msgInfo, msgQueueSize),
|
||||
timeoutTicker: NewTimeoutTicker(),
|
||||
@@ -297,6 +307,23 @@ func (cs *ConsensusState) OnStart() error {
|
||||
// reload from consensus log to catchup
|
||||
if cs.doWALCatchup {
|
||||
if err := cs.catchupReplay(cs.Height); err != nil {
|
||||
// don't try to recover from data corruption error
|
||||
if IsDataCorruptionError(err) {
|
||||
cs.Logger.Error("Encountered corrupt WAL file", "err", err.Error())
|
||||
cs.Logger.Error("Please repair the WAL file before restarting")
|
||||
fmt.Println(`You can attempt to repair the WAL as follows:
|
||||
|
||||
----
|
||||
WALFILE=~/.tendermint/data/cs.wal/wal
|
||||
cp $WALFILE ${WALFILE}.bak # backup the file
|
||||
go run scripts/wal2json/main.go $WALFILE > wal.json # this will panic, but can be ignored
|
||||
rm $WALFILE # remove the corrupt file
|
||||
go run scripts/json2wal/main.go wal.json $WALFILE # rebuild the file without corruption
|
||||
----`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
cs.Logger.Error("Error on catchup replay. Proceeding to start ConsensusState anyway", "err", err.Error())
|
||||
// NOTE: if we ever do return an error here,
|
||||
// make sure to stop the timeoutTicker
|
||||
@@ -427,7 +454,7 @@ func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType)
|
||||
// enterNewRound(height, 0) at cs.StartTime.
|
||||
func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) {
|
||||
//cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime)
|
||||
sleepDuration := rs.StartTime.Sub(tmtime.Now()) // nolint: gotype, gosimple
|
||||
sleepDuration := rs.StartTime.Sub(tmtime.Now())
|
||||
cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight)
|
||||
}
|
||||
|
||||
@@ -462,7 +489,7 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
|
||||
if precommit == nil {
|
||||
continue
|
||||
}
|
||||
added, err := lastPrecommits.AddVote(precommit)
|
||||
added, err := lastPrecommits.AddVote(seenCommit.ToVote(precommit))
|
||||
if !added || err != nil {
|
||||
cmn.PanicCrisis(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err))
|
||||
}
|
||||
@@ -490,7 +517,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
|
||||
// If state isn't further out than cs.state, just ignore.
|
||||
// This happens when SwitchToConsensus() is called in the reactor.
|
||||
// We don't want to reset e.g. the Votes, but we still want to
|
||||
// signal the new round step, because other services (eg. mempool)
|
||||
// signal the new round step, because other services (eg. txNotifier)
|
||||
// depend on having an up-to-date peer state!
|
||||
if !cs.state.IsEmpty() && (state.LastBlockHeight <= cs.state.LastBlockHeight) {
|
||||
cs.Logger.Info("Ignoring updateToState()", "newHeight", state.LastBlockHeight+1, "oldHeight", cs.state.LastBlockHeight+1)
|
||||
@@ -539,6 +566,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
|
||||
cs.CommitRound = -1
|
||||
cs.LastCommit = lastPrecommits
|
||||
cs.LastValidators = state.LastValidators
|
||||
cs.TriggeredTimeoutPrecommit = false
|
||||
|
||||
cs.state = state
|
||||
|
||||
@@ -605,7 +633,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
|
||||
var mi msgInfo
|
||||
|
||||
select {
|
||||
case <-cs.mempool.TxsAvailable():
|
||||
case <-cs.txNotifier.TxsAvailable():
|
||||
cs.handleTxsAvailable()
|
||||
case mi = <-cs.peerMsgQueue:
|
||||
cs.wal.Write(mi)
|
||||
@@ -614,6 +642,15 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
|
||||
cs.handleMsg(mi)
|
||||
case mi = <-cs.internalMsgQueue:
|
||||
cs.wal.WriteSync(mi) // NOTE: fsync
|
||||
|
||||
if _, ok := mi.Msg.(*VoteMessage); ok {
|
||||
// we actually want to simulate failing during
|
||||
// the previous WriteSync, but this isn't easy to do.
|
||||
// Equivalent would be to fail here and manually remove
|
||||
// some bytes from the end of the wal.
|
||||
fail.Fail() // XXX
|
||||
}
|
||||
|
||||
// handles proposals, block parts, votes
|
||||
cs.handleMsg(mi)
|
||||
case ti := <-cs.timeoutTicker.Chan(): // tockChan:
|
||||
@@ -721,6 +758,7 @@ func (cs *ConsensusState) handleTxsAvailable() {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
// we only need to do this for round 0
|
||||
cs.enterNewRound(cs.Height, 0)
|
||||
cs.enterPropose(cs.Height, 0)
|
||||
}
|
||||
|
||||
@@ -752,7 +790,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
|
||||
validators := cs.Validators
|
||||
if cs.Round < round {
|
||||
validators = validators.Copy()
|
||||
validators.IncrementAccum(round - cs.Round)
|
||||
validators.IncrementProposerPriority(round - cs.Round)
|
||||
}
|
||||
|
||||
// Setup new round
|
||||
@@ -771,7 +809,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
|
||||
cs.ProposalBlockParts = nil
|
||||
}
|
||||
cs.Votes.SetRound(round + 1) // also track next round (round+1) to allow round-skipping
|
||||
cs.triggeredTimeoutPrecommit = false
|
||||
cs.TriggeredTimeoutPrecommit = false
|
||||
|
||||
cs.eventBus.PublishEventNewRound(cs.NewRoundEvent())
|
||||
cs.metrics.Rounds.Set(float64(round))
|
||||
@@ -785,7 +823,6 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
|
||||
cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round,
|
||||
cstypes.RoundStepNewRound)
|
||||
}
|
||||
go cs.proposalHeartbeat(height, round)
|
||||
} else {
|
||||
cs.enterPropose(height, round)
|
||||
}
|
||||
@@ -802,38 +839,6 @@ func (cs *ConsensusState) needProofBlock(height int64) bool {
|
||||
return !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash)
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
addr := cs.privValidator.GetAddress()
|
||||
|
||||
if !cs.Validators.HasAddress(addr) {
|
||||
logger.Debug("Not sending proposalHearbeat. This node is not a validator", "addr", addr, "vals", cs.Validators)
|
||||
return
|
||||
}
|
||||
counter := 0
|
||||
valIndex, _ := cs.Validators.GetByAddress(addr)
|
||||
chainID := cs.state.ChainID
|
||||
for {
|
||||
rs := cs.GetRoundState()
|
||||
// if we've already moved on, no need to send more heartbeats
|
||||
if rs.Step > cstypes.RoundStepNewRound || rs.Round > round || rs.Height > height {
|
||||
return
|
||||
}
|
||||
heartbeat := &types.Heartbeat{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
Sequence: counter,
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: valIndex,
|
||||
}
|
||||
cs.privValidator.SignHeartbeat(chainID, heartbeat)
|
||||
cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat})
|
||||
cs.evsw.FireEvent(types.EventProposalHeartbeat, heartbeat)
|
||||
counter++
|
||||
time.Sleep(proposalHeartbeatIntervalSeconds * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Enter (CreateEmptyBlocks): from enterNewRound(height,round)
|
||||
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
|
||||
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
|
||||
@@ -869,13 +874,14 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
|
||||
}
|
||||
|
||||
// if not a validator, we're done
|
||||
if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
|
||||
logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators)
|
||||
address := cs.privValidator.GetPubKey().Address()
|
||||
if !cs.Validators.HasAddress(address) {
|
||||
logger.Debug("This node is not a validator", "addr", address, "vals", cs.Validators)
|
||||
return
|
||||
}
|
||||
logger.Debug("This node is a validator")
|
||||
|
||||
if cs.isProposer() {
|
||||
if cs.isProposer(address) {
|
||||
logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
||||
cs.decideProposal(height, round)
|
||||
} else {
|
||||
@@ -883,8 +889,8 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) isProposer() bool {
|
||||
return bytes.Equal(cs.Validators.GetProposer().Address, cs.privValidator.GetAddress())
|
||||
func (cs *ConsensusState) isProposer(address []byte) bool {
|
||||
return bytes.Equal(cs.Validators.GetProposer().Address, address)
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
|
||||
@@ -904,7 +910,7 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
|
||||
}
|
||||
|
||||
// Make proposal
|
||||
propBlockId := types.BlockID{block.Hash(), blockParts.Header()}
|
||||
propBlockId := types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}
|
||||
proposal := types.NewProposal(height, round, cs.ValidRound, propBlockId)
|
||||
if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal); err == nil {
|
||||
|
||||
@@ -949,7 +955,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
|
||||
if cs.Height == 1 {
|
||||
// We're creating a proposal for the first block.
|
||||
// The commit is empty, but not nil.
|
||||
commit = &types.Commit{}
|
||||
commit = types.NewCommit(types.BlockID{}, nil)
|
||||
} else if cs.LastCommit.HasTwoThirdsMajority() {
|
||||
// Make the commit from LastCommit
|
||||
commit = cs.LastCommit.MakeCommit()
|
||||
@@ -959,20 +965,8 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
|
||||
return
|
||||
}
|
||||
|
||||
maxBytes := cs.state.ConsensusParams.BlockSize.MaxBytes
|
||||
maxGas := cs.state.ConsensusParams.BlockSize.MaxGas
|
||||
// bound evidence to 1/10th of the block
|
||||
evidence := cs.evpool.PendingEvidence(types.MaxEvidenceBytesPerBlock(maxBytes))
|
||||
// Mempool validated transactions
|
||||
txs := cs.mempool.ReapMaxBytesMaxGas(types.MaxDataBytes(
|
||||
maxBytes,
|
||||
cs.state.Validators.Size(),
|
||||
len(evidence),
|
||||
), maxGas)
|
||||
proposerAddr := cs.privValidator.GetAddress()
|
||||
block, parts := cs.state.MakeBlock(cs.Height, txs, commit, evidence, proposerAddr)
|
||||
|
||||
return block, parts
|
||||
proposerAddr := cs.privValidator.GetPubKey().Address()
|
||||
return cs.blockExec.CreateProposalBlock(cs.Height, cs.state, commit, proposerAddr)
|
||||
}
|
||||
|
||||
// Enter: `timeoutPropose` after entering Propose.
|
||||
@@ -1161,12 +1155,12 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.triggeredTimeoutPrecommit) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.TriggeredTimeoutPrecommit) {
|
||||
logger.Debug(
|
||||
fmt.Sprintf(
|
||||
"enterPrecommitWait(%v/%v): Invalid args. "+
|
||||
"Current state is Height/Round: %v/%v/, triggeredTimeoutPrecommit:%v",
|
||||
height, round, cs.Height, cs.Round, cs.triggeredTimeoutPrecommit))
|
||||
"Current state is Height/Round: %v/%v/, TriggeredTimeoutPrecommit:%v",
|
||||
height, round, cs.Height, cs.Round, cs.TriggeredTimeoutPrecommit))
|
||||
return
|
||||
}
|
||||
if !cs.Votes.Precommits(round).HasTwoThirdsAny() {
|
||||
@@ -1176,7 +1170,7 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommitWait:
|
||||
cs.triggeredTimeoutPrecommit = true
|
||||
cs.TriggeredTimeoutPrecommit = true
|
||||
cs.newStep()
|
||||
}()
|
||||
|
||||
@@ -1327,7 +1321,7 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
|
||||
// Execute and commit the block, update and save the state, and update the mempool.
|
||||
// NOTE The block.AppHash wont reflect these txs until the next block.
|
||||
var err error
|
||||
stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{block.Hash(), blockParts.Header()}, block)
|
||||
stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}, block)
|
||||
if err != nil {
|
||||
cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err)
|
||||
err := cmn.Kill()
|
||||
@@ -1363,7 +1357,7 @@ func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) {
|
||||
missingValidators := 0
|
||||
missingValidatorsPower := int64(0)
|
||||
for i, val := range cs.Validators.Validators {
|
||||
var vote *types.Vote
|
||||
var vote *types.CommitSig
|
||||
if i < len(block.LastCommit.Precommits) {
|
||||
vote = block.LastCommit.Precommits[i]
|
||||
}
|
||||
@@ -1514,7 +1508,8 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, err
|
||||
if err == ErrVoteHeightMismatch {
|
||||
return added, err
|
||||
} else if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok {
|
||||
if bytes.Equal(vote.ValidatorAddress, cs.privValidator.GetAddress()) {
|
||||
addr := cs.privValidator.GetPubKey().Address()
|
||||
if bytes.Equal(vote.ValidatorAddress, addr) {
|
||||
cs.Logger.Error("Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, "type", vote.Type)
|
||||
return added, err
|
||||
}
|
||||
@@ -1549,7 +1544,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
}
|
||||
|
||||
cs.Logger.Info(fmt.Sprintf("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote})
|
||||
cs.evsw.FireEvent(types.EventVote, vote)
|
||||
|
||||
// if we can skip timeoutCommit and have all the votes now,
|
||||
@@ -1566,7 +1561,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
// Not necessarily a bad peer, but not favourable behaviour.
|
||||
if vote.Height != cs.Height {
|
||||
err = ErrVoteHeightMismatch
|
||||
cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err)
|
||||
cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "peerID", peerID)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1577,7 +1572,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
return
|
||||
}
|
||||
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote})
|
||||
cs.evsw.FireEvent(types.EventVote, vote)
|
||||
|
||||
switch vote.Type {
|
||||
@@ -1679,7 +1674,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) signVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
|
||||
addr := cs.privValidator.GetAddress()
|
||||
addr := cs.privValidator.GetPubKey().Address()
|
||||
valIndex, _ := cs.Validators.GetByAddress(addr)
|
||||
|
||||
vote := &types.Vote{
|
||||
@@ -1689,7 +1684,7 @@ func (cs *ConsensusState) signVote(type_ types.SignedMsgType, hash []byte, heade
|
||||
Round: cs.Round,
|
||||
Timestamp: cs.voteTime(),
|
||||
Type: type_,
|
||||
BlockID: types.BlockID{hash, header},
|
||||
BlockID: types.BlockID{Hash: hash, PartsHeader: header},
|
||||
}
|
||||
err := cs.privValidator.SignVote(cs.state.ChainID, vote)
|
||||
return vote, err
|
||||
@@ -1715,7 +1710,7 @@ func (cs *ConsensusState) voteTime() time.Time {
|
||||
// sign the vote and publish on internalMsgQueue
|
||||
func (cs *ConsensusState) signAddVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote {
|
||||
// if we don't have a key or we're not in the validator set, do nothing
|
||||
if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
|
||||
if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetPubKey().Address()) {
|
||||
return nil
|
||||
}
|
||||
vote, err := cs.signVote(type_, hash, header)
|
||||
|
@@ -11,8 +11,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
@@ -20,14 +18,6 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
config = ResetConfig("consensus_state_test")
|
||||
}
|
||||
|
||||
func ensureProposeTimeout(timeoutPropose time.Duration) time.Duration {
|
||||
return time.Duration(timeoutPropose.Nanoseconds()*2) * time.Nanosecond
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
ProposeSuite
|
||||
@@ -75,7 +65,8 @@ func TestStateProposerSelection0(t *testing.T) {
|
||||
|
||||
// Commit a block and ensure proposer for the next height is correct.
|
||||
prop := cs1.GetRoundState().Validators.GetProposer()
|
||||
if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) {
|
||||
address := cs1.privValidator.GetPubKey().Address()
|
||||
if !bytes.Equal(prop.Address, address) {
|
||||
t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address)
|
||||
}
|
||||
|
||||
@@ -89,7 +80,8 @@ func TestStateProposerSelection0(t *testing.T) {
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
prop = cs1.GetRoundState().Validators.GetProposer()
|
||||
if !bytes.Equal(prop.Address, vss[1].GetAddress()) {
|
||||
addr := vss[1].GetPubKey().Address()
|
||||
if !bytes.Equal(prop.Address, addr) {
|
||||
panic(fmt.Sprintf("expected proposer to be validator %d. Got %X", 1, prop.Address))
|
||||
}
|
||||
}
|
||||
@@ -112,7 +104,8 @@ func TestStateProposerSelection2(t *testing.T) {
|
||||
// everyone just votes nil. we get a new proposer each round
|
||||
for i := 0; i < len(vss); i++ {
|
||||
prop := cs1.GetRoundState().Validators.GetProposer()
|
||||
correctProposer := vss[(i+round)%len(vss)].GetAddress()
|
||||
addr := vss[(i+round)%len(vss)].GetPubKey().Address()
|
||||
correctProposer := addr
|
||||
if !bytes.Equal(prop.Address, correctProposer) {
|
||||
panic(fmt.Sprintf("expected RoundState.Validators.GetProposer() to be validator %d. Got %X", (i+2)%len(vss), prop.Address))
|
||||
}
|
||||
@@ -507,7 +500,8 @@ func TestStateLockPOLRelock(t *testing.T) {
|
||||
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader)
|
||||
|
||||
@@ -598,7 +592,8 @@ func TestStateLockPOLUnlock(t *testing.T) {
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
|
||||
// everything done from perspective of cs1
|
||||
|
||||
@@ -691,7 +686,8 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
startTestRound(cs1, cs1.Height, round)
|
||||
@@ -807,7 +803,8 @@ func TestStateLockPOLSafety2(t *testing.T) {
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
|
||||
// the block for R0: gets polkad but we miss it
|
||||
// (even though we signed it, shhh)
|
||||
@@ -898,7 +895,8 @@ func TestProposeValidBlock(t *testing.T) {
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
startTestRound(cs1, cs1.Height, round)
|
||||
@@ -984,7 +982,8 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) {
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
startTestRound(cs1, cs1.Height, round)
|
||||
@@ -1029,33 +1028,6 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) {
|
||||
assert.True(t, rs.ValidRound == round)
|
||||
}
|
||||
|
||||
// regression for #2518
|
||||
func TestNoHearbeatWhenNotValidator(t *testing.T) {
|
||||
cs, _ := randConsensusState(4)
|
||||
cs.Validators = types.NewValidatorSet(nil) // make sure we are not in the validator set
|
||||
|
||||
cs.evsw.AddListenerForEvent("testing", types.EventProposalHeartbeat,
|
||||
func(data tmevents.EventData) {
|
||||
t.Errorf("Should not have broadcasted heartbeat")
|
||||
})
|
||||
go cs.proposalHeartbeat(10, 1)
|
||||
|
||||
cs.Stop()
|
||||
|
||||
// if a faulty implementation sends an event, we should wait here a little bit to make sure we don't miss it by prematurely leaving the test method
|
||||
time.Sleep((proposalHeartbeatIntervalSeconds + 1) * time.Second)
|
||||
}
|
||||
|
||||
// regression for #2518
|
||||
func TestHearbeatWhenWeAreValidator(t *testing.T) {
|
||||
cs, _ := randConsensusState(4)
|
||||
heartbeatCh := subscribe(cs.eventBus, types.EventQueryProposalHeartbeat)
|
||||
|
||||
go cs.proposalHeartbeat(10, 1)
|
||||
ensureProposalHeartbeat(heartbeatCh)
|
||||
|
||||
}
|
||||
|
||||
// What we want:
|
||||
// P0 miss to lock B as Proposal Block is missing, but set valid block to B after
|
||||
// receiving delayed Block Proposal.
|
||||
@@ -1070,7 +1042,8 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) {
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
|
||||
round = round + 1 // move to round in which P0 is not proposer
|
||||
@@ -1140,7 +1113,8 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) {
|
||||
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
|
||||
// start round
|
||||
startTestRound(cs1, height, round)
|
||||
@@ -1173,7 +1147,8 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) {
|
||||
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
|
||||
// start round
|
||||
startTestRound(cs1, height, round)
|
||||
@@ -1206,7 +1181,8 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) {
|
||||
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
|
||||
// start round in which PO is not proposer
|
||||
startTestRound(cs1, height, round)
|
||||
@@ -1295,6 +1271,130 @@ func TestCommitFromPreviousRound(t *testing.T) {
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
}
|
||||
|
||||
type fakeTxNotifier struct {
|
||||
ch chan struct{}
|
||||
}
|
||||
|
||||
func (n *fakeTxNotifier) TxsAvailable() <-chan struct{} {
|
||||
return n.ch
|
||||
}
|
||||
|
||||
func (n *fakeTxNotifier) Notify() {
|
||||
n.ch <- struct{}{}
|
||||
}
|
||||
|
||||
func TestStartNextHeightCorrectly(t *testing.T) {
|
||||
config.Consensus.SkipTimeoutCommit = false
|
||||
cs1, vss := randConsensusState(4)
|
||||
cs1.txNotifier = &fakeTxNotifier{ch: make(chan struct{})}
|
||||
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader)
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
startTestRound(cs1, height, round)
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
rs := cs1.GetRoundState()
|
||||
theBlockHash := rs.ProposalBlock.Hash()
|
||||
theBlockParts := rs.ProposalBlockParts.Header()
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], theBlockHash)
|
||||
|
||||
signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4)
|
||||
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
// the proposed block should now be locked and our precommit added
|
||||
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
|
||||
// add precommits
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs4)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
assert.True(t, rs.TriggeredTimeoutPrecommit)
|
||||
|
||||
ensureNewBlockHeader(newBlockHeader, height, theBlockHash)
|
||||
|
||||
cs1.txNotifier.(*fakeTxNotifier).Notify()
|
||||
|
||||
ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.TimeoutPropose.Nanoseconds())
|
||||
rs = cs1.GetRoundState()
|
||||
assert.False(t, rs.TriggeredTimeoutPrecommit, "triggeredTimeoutPrecommit should be false at the beginning of each round")
|
||||
}
|
||||
|
||||
func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
|
||||
config.Consensus.SkipTimeoutCommit = false
|
||||
cs1, vss := randConsensusState(4)
|
||||
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader)
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
startTestRound(cs1, height, round)
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
rs := cs1.GetRoundState()
|
||||
theBlockHash := rs.ProposalBlock.Hash()
|
||||
theBlockParts := rs.ProposalBlockParts.Header()
|
||||
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], theBlockHash)
|
||||
|
||||
signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4)
|
||||
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
|
||||
// add precommits
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs4)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
assert.True(t, rs.TriggeredTimeoutPrecommit)
|
||||
|
||||
ensureNewBlockHeader(newBlockHeader, height, theBlockHash)
|
||||
|
||||
prop, propBlock := decideProposal(cs1, vs2, height+1, 0)
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
|
||||
if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ensureNewProposal(proposalCh, height+1, 0)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
assert.False(t, rs.TriggeredTimeoutPrecommit, "triggeredTimeoutPrecommit should be false at the beginning of each height")
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------------------
|
||||
// SlashingSuite
|
||||
// TODO: Slashing
|
||||
@@ -1390,7 +1490,8 @@ func TestStateHalt1(t *testing.T) {
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
addr := cs1.privValidator.GetPubKey().Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
startTestRound(cs1, height, round)
|
||||
|
@@ -2,6 +2,7 @@ package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
@@ -11,8 +12,11 @@ import (
|
||||
|
||||
var config *cfg.Config // NOTE: must be reset for each _test.go file
|
||||
|
||||
func init() {
|
||||
func TestMain(m *testing.M) {
|
||||
config = cfg.ResetTestRoot("consensus_height_vote_set_test")
|
||||
code := m.Run()
|
||||
os.RemoveAll(config.RootDir)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func TestPeerCatchupRounds(t *testing.T) {
|
||||
@@ -50,8 +54,9 @@ func TestPeerCatchupRounds(t *testing.T) {
|
||||
|
||||
func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivValidator, valIndex int) *types.Vote {
|
||||
privVal := privVals[valIndex]
|
||||
addr := privVal.GetPubKey().Address()
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: privVal.GetAddress(),
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: valIndex,
|
||||
Height: height,
|
||||
Round: round,
|
||||
@@ -63,7 +68,6 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali
|
||||
err := privVal.SignVote(chainID, vote)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error signing vote: %v", err))
|
||||
return nil
|
||||
}
|
||||
return vote
|
||||
}
|
||||
|
@@ -65,25 +65,26 @@ func (rs RoundStepType) String() string {
|
||||
// NOTE: Not thread safe. Should only be manipulated by functions downstream
|
||||
// of the cs.receiveRoutine
|
||||
type RoundState struct {
|
||||
Height int64 `json:"height"` // Height we are working on
|
||||
Round int `json:"round"`
|
||||
Step RoundStepType `json:"step"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
CommitTime time.Time `json:"commit_time"` // Subjective time when +2/3 precommits for Block at Round were found
|
||||
Validators *types.ValidatorSet `json:"validators"`
|
||||
Proposal *types.Proposal `json:"proposal"`
|
||||
ProposalBlock *types.Block `json:"proposal_block"`
|
||||
ProposalBlockParts *types.PartSet `json:"proposal_block_parts"`
|
||||
LockedRound int `json:"locked_round"`
|
||||
LockedBlock *types.Block `json:"locked_block"`
|
||||
LockedBlockParts *types.PartSet `json:"locked_block_parts"`
|
||||
ValidRound int `json:"valid_round"` // Last known round with POL for non-nil valid block.
|
||||
ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above.
|
||||
ValidBlockParts *types.PartSet `json:"valid_block_parts"` // Last known block parts of POL metnioned above.
|
||||
Votes *HeightVoteSet `json:"votes"`
|
||||
CommitRound int `json:"commit_round"` //
|
||||
LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1
|
||||
LastValidators *types.ValidatorSet `json:"last_validators"`
|
||||
Height int64 `json:"height"` // Height we are working on
|
||||
Round int `json:"round"`
|
||||
Step RoundStepType `json:"step"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
CommitTime time.Time `json:"commit_time"` // Subjective time when +2/3 precommits for Block at Round were found
|
||||
Validators *types.ValidatorSet `json:"validators"`
|
||||
Proposal *types.Proposal `json:"proposal"`
|
||||
ProposalBlock *types.Block `json:"proposal_block"`
|
||||
ProposalBlockParts *types.PartSet `json:"proposal_block_parts"`
|
||||
LockedRound int `json:"locked_round"`
|
||||
LockedBlock *types.Block `json:"locked_block"`
|
||||
LockedBlockParts *types.PartSet `json:"locked_block_parts"`
|
||||
ValidRound int `json:"valid_round"` // Last known round with POL for non-nil valid block.
|
||||
ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above.
|
||||
ValidBlockParts *types.PartSet `json:"valid_block_parts"` // Last known block parts of POL metnioned above.
|
||||
Votes *HeightVoteSet `json:"votes"`
|
||||
CommitRound int `json:"commit_round"` //
|
||||
LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1
|
||||
LastValidators *types.ValidatorSet `json:"last_validators"`
|
||||
TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"`
|
||||
}
|
||||
|
||||
// Compressed version of the RoundState for use in RPC
|
||||
|
@@ -3,7 +3,7 @@ package types
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/go-amino"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -16,7 +16,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
|
||||
// Random validators
|
||||
nval, ntxs := 100, 100
|
||||
vset, _ := types.RandValidatorSet(nval, 1)
|
||||
precommits := make([]*types.Vote, nval)
|
||||
precommits := make([]*types.CommitSig, nval)
|
||||
blockID := types.BlockID{
|
||||
Hash: cmn.RandBytes(20),
|
||||
PartsHeader: types.PartSetHeader{
|
||||
@@ -25,12 +25,12 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
|
||||
}
|
||||
sig := make([]byte, ed25519.SignatureSize)
|
||||
for i := 0; i < nval; i++ {
|
||||
precommits[i] = &types.Vote{
|
||||
precommits[i] = (&types.Vote{
|
||||
ValidatorAddress: types.Address(cmn.RandBytes(20)),
|
||||
Timestamp: tmtime.Now(),
|
||||
BlockID: blockID,
|
||||
Signature: sig,
|
||||
}
|
||||
}).CommitSig()
|
||||
}
|
||||
txs := make([]types.Tx, ntxs)
|
||||
for i := 0; i < ntxs; i++ {
|
||||
@@ -53,11 +53,8 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
|
||||
Data: types.Data{
|
||||
Txs: txs,
|
||||
},
|
||||
Evidence: types.EvidenceData{},
|
||||
LastCommit: &types.Commit{
|
||||
BlockID: blockID,
|
||||
Precommits: precommits,
|
||||
},
|
||||
Evidence: types.EvidenceData{},
|
||||
LastCommit: types.NewCommit(blockID, precommits),
|
||||
}
|
||||
parts := block.MakePartSet(4096)
|
||||
// Random Proposal
|
||||
|
@@ -112,11 +112,21 @@ func (wal *baseWAL) OnStart() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop the underlying autofile group.
|
||||
// Use Wait() to ensure it's finished shutting down
|
||||
// before cleaning up files.
|
||||
func (wal *baseWAL) OnStop() {
|
||||
wal.group.Flush()
|
||||
wal.group.Stop()
|
||||
wal.group.Close()
|
||||
}
|
||||
|
||||
// Wait for the underlying autofile group to finish shutting down
|
||||
// so it's safe to cleanup files.
|
||||
func (wal *baseWAL) Wait() {
|
||||
wal.group.Wait()
|
||||
}
|
||||
|
||||
// Write is called in newStep and for each receive on the
|
||||
// peerMsgQueue and the timeoutTicker.
|
||||
// NOTE: does not call fsync()
|
||||
@@ -163,7 +173,7 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
|
||||
// NOTE: starting from the last file in the group because we're usually
|
||||
// searching for the last height. See replay.go
|
||||
min, max := wal.group.MinIndex(), wal.group.MaxIndex()
|
||||
wal.Logger.Debug("Searching for height", "height", height, "min", min, "max", max)
|
||||
wal.Logger.Info("Searching for height", "height", height, "min", min, "max", max)
|
||||
for index := max; index >= min; index-- {
|
||||
gr, err = wal.group.NewReader(index)
|
||||
if err != nil {
|
||||
@@ -183,7 +193,7 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
|
||||
break
|
||||
}
|
||||
if options.IgnoreDataCorruptionErrors && IsDataCorruptionError(err) {
|
||||
wal.Logger.Debug("Corrupted entry. Skipping...", "err", err)
|
||||
wal.Logger.Error("Corrupted entry. Skipping...", "err", err)
|
||||
// do nothing
|
||||
continue
|
||||
} else if err != nil {
|
||||
@@ -194,7 +204,7 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
|
||||
if m, ok := msg.Msg.(EndHeightMessage); ok {
|
||||
lastHeightFound = m.Height
|
||||
if m.Height == height { // found
|
||||
wal.Logger.Debug("Found", "height", height, "index", index)
|
||||
wal.Logger.Info("Found", "height", height, "index", index)
|
||||
return gr, true, nil
|
||||
}
|
||||
}
|
||||
@@ -219,12 +229,17 @@ func NewWALEncoder(wr io.Writer) *WALEncoder {
|
||||
return &WALEncoder{wr}
|
||||
}
|
||||
|
||||
// Encode writes the custom encoding of v to the stream.
|
||||
// Encode writes the custom encoding of v to the stream. It returns an error if
|
||||
// the amino-encoded size of v is greater than 1MB. Any error encountered
|
||||
// during the write is also returned.
|
||||
func (enc *WALEncoder) Encode(v *TimedWALMessage) error {
|
||||
data := cdc.MustMarshalBinaryBare(v)
|
||||
|
||||
crc := crc32.Checksum(data, crc32c)
|
||||
length := uint32(len(data))
|
||||
if length > maxMsgSizeBytes {
|
||||
return fmt.Errorf("Msg is too big: %d bytes, max: %d bytes", length, maxMsgSizeBytes)
|
||||
}
|
||||
totalLength := 8 + int(length)
|
||||
|
||||
msg := make([]byte, totalLength)
|
||||
@@ -281,31 +296,31 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
|
||||
return nil, err
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read checksum: %v", err)
|
||||
return nil, DataCorruptionError{fmt.Errorf("failed to read checksum: %v", err)}
|
||||
}
|
||||
crc := binary.BigEndian.Uint32(b)
|
||||
|
||||
b = make([]byte, 4)
|
||||
_, err = dec.rd.Read(b)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read length: %v", err)
|
||||
return nil, DataCorruptionError{fmt.Errorf("failed to read length: %v", err)}
|
||||
}
|
||||
length := binary.BigEndian.Uint32(b)
|
||||
|
||||
if length > maxMsgSizeBytes {
|
||||
return nil, fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes)
|
||||
return nil, DataCorruptionError{fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes)}
|
||||
}
|
||||
|
||||
data := make([]byte, length)
|
||||
_, err = dec.rd.Read(data)
|
||||
n, err := dec.rd.Read(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read data: %v", err)
|
||||
return nil, DataCorruptionError{fmt.Errorf("failed to read data: %v (read: %d, wanted: %d)", err, n, length)}
|
||||
}
|
||||
|
||||
// check checksum before decoding data
|
||||
actualCRC := crc32.Checksum(data, crc32c)
|
||||
if actualCRC != crc {
|
||||
return nil, DataCorruptionError{fmt.Errorf("checksums do not match: (read: %v, actual: %v)", crc, actualCRC)}
|
||||
return nil, DataCorruptionError{fmt.Errorf("checksums do not match: read: %v, actual: %v", crc, actualCRC)}
|
||||
}
|
||||
|
||||
var res = new(TimedWALMessage) // nolint: gosimple
|
||||
|
@@ -7,7 +7,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -28,8 +28,9 @@ import (
|
||||
// stripped down version of node (proxy app, event bus, consensus state) with a
|
||||
// persistent kvstore application and special consensus wal instance
|
||||
// (byteBufferWAL) and waits until numBlocks are created. If the node fails to produce given numBlocks, it returns an error.
|
||||
func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) {
|
||||
config := getConfig()
|
||||
func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
config := getConfig(t)
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator"))
|
||||
|
||||
@@ -40,8 +41,9 @@ func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) {
|
||||
// COPY PASTE FROM node.go WITH A FEW MODIFICATIONS
|
||||
// NOTE: we can't import node package because of circular dependency.
|
||||
// NOTE: we don't do handshake so need to set state.Version.Consensus.App directly.
|
||||
privValidatorFile := config.PrivValidatorFile()
|
||||
privValidator := privval.LoadOrGenFilePV(privValidatorFile)
|
||||
privValidatorKeyFile := config.PrivValidatorKeyFile()
|
||||
privValidatorStateFile := config.PrivValidatorStateFile()
|
||||
privValidator := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile)
|
||||
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read genesis file")
|
||||
@@ -101,11 +103,11 @@ func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) {
|
||||
}
|
||||
|
||||
//WALWithNBlocks returns a WAL content with numBlocks.
|
||||
func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
||||
func WALWithNBlocks(t *testing.T, numBlocks int) (data []byte, err error) {
|
||||
var b bytes.Buffer
|
||||
wr := bufio.NewWriter(&b)
|
||||
|
||||
if err := WALGenerateNBlocks(wr, numBlocks); err != nil {
|
||||
if err := WALGenerateNBlocks(t, wr, numBlocks); err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
@@ -113,18 +115,6 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// f**ing long, but unique for each test
|
||||
func makePathname() string {
|
||||
// get path
|
||||
p, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// fmt.Println(p)
|
||||
sep := string(filepath.Separator)
|
||||
return strings.Replace(p, sep, "_", -1)
|
||||
}
|
||||
|
||||
func randPort() int {
|
||||
// returns between base and base + spread
|
||||
base, spread := 20000, 20000
|
||||
@@ -139,9 +129,8 @@ func makeAddrs() (string, string, string) {
|
||||
}
|
||||
|
||||
// getConfig returns a config for test cases
|
||||
func getConfig() *cfg.Config {
|
||||
pathname := makePathname()
|
||||
c := cfg.ResetTestRoot(fmt.Sprintf("%s_%d", pathname, cmn.RandInt()))
|
||||
func getConfig(t *testing.T) *cfg.Config {
|
||||
c := cfg.ResetTestRoot(t.Name())
|
||||
|
||||
// and we use random ports to run in parallel
|
||||
tm, rpc, grpc := makeAddrs()
|
||||
|
@@ -7,6 +7,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
// "sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -38,11 +39,16 @@ func TestWALTruncate(t *testing.T) {
|
||||
wal.SetLogger(log.TestingLogger())
|
||||
err = wal.Start()
|
||||
require.NoError(t, err)
|
||||
defer wal.Stop()
|
||||
defer func() {
|
||||
wal.Stop()
|
||||
// wait for the wal to finish shutting down so we
|
||||
// can safely remove the directory
|
||||
wal.Wait()
|
||||
}()
|
||||
|
||||
//60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), when headBuf is full, truncate content will Flush to the file.
|
||||
//at this time, RotateFile is called, truncate content exist in each file.
|
||||
err = WALGenerateNBlocks(wal.Group(), 60)
|
||||
err = WALGenerateNBlocks(t, wal.Group(), 60)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run
|
||||
@@ -67,8 +73,8 @@ func TestWALTruncate(t *testing.T) {
|
||||
func TestWALEncoderDecoder(t *testing.T) {
|
||||
now := tmtime.Now()
|
||||
msgs := []TimedWALMessage{
|
||||
TimedWALMessage{Time: now, Msg: EndHeightMessage{0}},
|
||||
TimedWALMessage{Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}},
|
||||
{Time: now, Msg: EndHeightMessage{0}},
|
||||
{Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}},
|
||||
}
|
||||
|
||||
b := new(bytes.Buffer)
|
||||
@@ -89,8 +95,28 @@ func TestWALEncoderDecoder(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWALWritePanicsIfMsgIsTooBig(t *testing.T) {
|
||||
walDir, err := ioutil.TempDir("", "wal")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(walDir)
|
||||
walFile := filepath.Join(walDir, "wal")
|
||||
|
||||
wal, err := NewWAL(walFile)
|
||||
require.NoError(t, err)
|
||||
err = wal.Start()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
wal.Stop()
|
||||
// wait for the wal to finish shutting down so we
|
||||
// can safely remove the directory
|
||||
wal.Wait()
|
||||
}()
|
||||
|
||||
assert.Panics(t, func() { wal.Write(make([]byte, maxMsgSizeBytes+1)) })
|
||||
}
|
||||
|
||||
func TestWALSearchForEndHeight(t *testing.T) {
|
||||
walBody, err := WALWithNBlocks(6)
|
||||
walBody, err := WALWithNBlocks(t, 6)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@@ -1,7 +1,7 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"golang.org/x/crypto/openpgp/armor" // forked to github.com/tendermint/crypto
|
||||
"golang.org/x/crypto/openpgp/armor"
|
||||
)
|
||||
|
||||
func EncodeArmor(blockType string, headers map[string]string, data []byte) string {
|
||||
|
@@ -7,7 +7,7 @@ import (
|
||||
"io"
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"golang.org/x/crypto/ed25519" // forked to github.com/tendermint/crypto
|
||||
"golang.org/x/crypto/ed25519"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
var _ crypto.PrivKey = PrivKeyEd25519{}
|
||||
|
||||
const (
|
||||
PrivKeyAminoRoute = "tendermint/PrivKeyEd25519"
|
||||
PubKeyAminoRoute = "tendermint/PubKeyEd25519"
|
||||
PrivKeyAminoName = "tendermint/PrivKeyEd25519"
|
||||
PubKeyAminoName = "tendermint/PubKeyEd25519"
|
||||
// Size of an Edwards25519 signature. Namely the size of a compressed
|
||||
// Edwards25519 point, and a field element. Both of which are 32 bytes.
|
||||
SignatureSize = 64
|
||||
@@ -30,11 +30,11 @@ var cdc = amino.NewCodec()
|
||||
func init() {
|
||||
cdc.RegisterInterface((*crypto.PubKey)(nil), nil)
|
||||
cdc.RegisterConcrete(PubKeyEd25519{},
|
||||
PubKeyAminoRoute, nil)
|
||||
PubKeyAminoName, nil)
|
||||
|
||||
cdc.RegisterInterface((*crypto.PrivKey)(nil), nil)
|
||||
cdc.RegisterConcrete(PrivKeyEd25519{},
|
||||
PrivKeyAminoRoute, nil)
|
||||
PrivKeyAminoName, nil)
|
||||
}
|
||||
|
||||
// PrivKeyEd25519 implements crypto.PrivKey.
|
||||
|
@@ -12,11 +12,11 @@ import (
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
// routeTable is used to map public key concrete types back
|
||||
// to their amino routes. This should eventually be handled
|
||||
// nameTable is used to map public key concrete types back
|
||||
// to their registered amino names. This should eventually be handled
|
||||
// by amino. Example usage:
|
||||
// routeTable[reflect.TypeOf(ed25519.PubKeyEd25519{})] = ed25519.PubKeyAminoRoute
|
||||
var routeTable = make(map[reflect.Type]string, 3)
|
||||
// nameTable[reflect.TypeOf(ed25519.PubKeyEd25519{})] = ed25519.PubKeyAminoName
|
||||
var nameTable = make(map[reflect.Type]string, 3)
|
||||
|
||||
func init() {
|
||||
// NOTE: It's important that there be no conflicts here,
|
||||
@@ -29,16 +29,16 @@ func init() {
|
||||
|
||||
// TODO: Have amino provide a way to go from concrete struct to route directly.
|
||||
// Its currently a private API
|
||||
routeTable[reflect.TypeOf(ed25519.PubKeyEd25519{})] = ed25519.PubKeyAminoRoute
|
||||
routeTable[reflect.TypeOf(secp256k1.PubKeySecp256k1{})] = secp256k1.PubKeyAminoRoute
|
||||
routeTable[reflect.TypeOf(&multisig.PubKeyMultisigThreshold{})] = multisig.PubKeyMultisigThresholdAminoRoute
|
||||
nameTable[reflect.TypeOf(ed25519.PubKeyEd25519{})] = ed25519.PubKeyAminoName
|
||||
nameTable[reflect.TypeOf(secp256k1.PubKeySecp256k1{})] = secp256k1.PubKeyAminoName
|
||||
nameTable[reflect.TypeOf(multisig.PubKeyMultisigThreshold{})] = multisig.PubKeyMultisigThresholdAminoRoute
|
||||
}
|
||||
|
||||
// PubkeyAminoRoute returns the amino route of a pubkey
|
||||
// PubkeyAminoName returns the amino route of a pubkey
|
||||
// cdc is currently passed in, as eventually this will not be using
|
||||
// a package level codec.
|
||||
func PubkeyAminoRoute(cdc *amino.Codec, key crypto.PubKey) (string, bool) {
|
||||
route, found := routeTable[reflect.TypeOf(key)]
|
||||
func PubkeyAminoName(cdc *amino.Codec, key crypto.PubKey) (string, bool) {
|
||||
route, found := nameTable[reflect.TypeOf(key)]
|
||||
return route, found
|
||||
}
|
||||
|
||||
@@ -47,17 +47,17 @@ func RegisterAmino(cdc *amino.Codec) {
|
||||
// These are all written here instead of
|
||||
cdc.RegisterInterface((*crypto.PubKey)(nil), nil)
|
||||
cdc.RegisterConcrete(ed25519.PubKeyEd25519{},
|
||||
ed25519.PubKeyAminoRoute, nil)
|
||||
ed25519.PubKeyAminoName, nil)
|
||||
cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{},
|
||||
secp256k1.PubKeyAminoRoute, nil)
|
||||
secp256k1.PubKeyAminoName, nil)
|
||||
cdc.RegisterConcrete(multisig.PubKeyMultisigThreshold{},
|
||||
multisig.PubKeyMultisigThresholdAminoRoute, nil)
|
||||
|
||||
cdc.RegisterInterface((*crypto.PrivKey)(nil), nil)
|
||||
cdc.RegisterConcrete(ed25519.PrivKeyEd25519{},
|
||||
ed25519.PrivKeyAminoRoute, nil)
|
||||
ed25519.PrivKeyAminoName, nil)
|
||||
cdc.RegisterConcrete(secp256k1.PrivKeySecp256k1{},
|
||||
secp256k1.PrivKeyAminoRoute, nil)
|
||||
secp256k1.PrivKeyAminoName, nil)
|
||||
}
|
||||
|
||||
func PrivKeyFromBytes(privKeyBytes []byte) (privKey crypto.PrivKey, err error) {
|
||||
|
@@ -25,9 +25,8 @@ func checkAminoBinary(t *testing.T, src, dst interface{}, size int) {
|
||||
assert.Equal(t, byterSrc.Bytes(), bz, "Amino binary vs Bytes() mismatch")
|
||||
}
|
||||
// Make sure we have the expected length.
|
||||
if size != -1 {
|
||||
assert.Equal(t, size, len(bz), "Amino binary size mismatch")
|
||||
}
|
||||
assert.Equal(t, size, len(bz), "Amino binary size mismatch")
|
||||
|
||||
// Unmarshal.
|
||||
err = cdc.UnmarshalBinaryBare(bz, dst)
|
||||
require.Nil(t, err, "%+v", err)
|
||||
@@ -48,6 +47,8 @@ func checkAminoJSON(t *testing.T, src interface{}, dst interface{}, isNil bool)
|
||||
require.Nil(t, err, "%+v", err)
|
||||
}
|
||||
|
||||
// ExamplePrintRegisteredTypes refers to unknown identifier: PrintRegisteredTypes
|
||||
//nolint:govet
|
||||
func ExamplePrintRegisteredTypes() {
|
||||
cdc.PrintTypes(os.Stdout)
|
||||
// Output: | Type | Name | Prefix | Length | Notes |
|
||||
@@ -128,18 +129,18 @@ func TestPubKeyInvalidDataProperReturnsEmpty(t *testing.T) {
|
||||
require.Nil(t, pk)
|
||||
}
|
||||
|
||||
func TestPubkeyAminoRoute(t *testing.T) {
|
||||
func TestPubkeyAminoName(t *testing.T) {
|
||||
tests := []struct {
|
||||
key crypto.PubKey
|
||||
want string
|
||||
found bool
|
||||
}{
|
||||
{ed25519.PubKeyEd25519{}, ed25519.PubKeyAminoRoute, true},
|
||||
{secp256k1.PubKeySecp256k1{}, secp256k1.PubKeyAminoRoute, true},
|
||||
{&multisig.PubKeyMultisigThreshold{}, multisig.PubKeyMultisigThresholdAminoRoute, true},
|
||||
{ed25519.PubKeyEd25519{}, ed25519.PubKeyAminoName, true},
|
||||
{secp256k1.PubKeySecp256k1{}, secp256k1.PubKeyAminoName, true},
|
||||
{multisig.PubKeyMultisigThreshold{}, multisig.PubKeyMultisigThresholdAminoRoute, true},
|
||||
}
|
||||
for i, tc := range tests {
|
||||
got, found := PubkeyAminoRoute(cdc, tc.key)
|
||||
got, found := PubkeyAminoName(cdc, tc.key)
|
||||
require.Equal(t, tc.found, found, "not equal on tc %d", i)
|
||||
if tc.found {
|
||||
require.Equal(t, tc.want, got, "not equal on tc %d", i)
|
||||
|
@@ -3,7 +3,7 @@ package crypto
|
||||
import (
|
||||
"crypto/sha256"
|
||||
|
||||
"golang.org/x/crypto/ripemd160" // forked to github.com/tendermint/crypto
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
)
|
||||
|
||||
func Sha256(bytes []byte) []byte {
|
||||
|
21
crypto/merkle/hash.go
Normal file
21
crypto/merkle/hash.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package merkle
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
// TODO: make these have a large predefined capacity
|
||||
var (
|
||||
leafPrefix = []byte{0}
|
||||
innerPrefix = []byte{1}
|
||||
)
|
||||
|
||||
// returns tmhash(0x00 || leaf)
|
||||
func leafHash(leaf []byte) []byte {
|
||||
return tmhash.Sum(append(leafPrefix, leaf...))
|
||||
}
|
||||
|
||||
// returns tmhash(0x01 || left || right)
|
||||
func innerHash(left []byte, right []byte) []byte {
|
||||
return tmhash.Sum(append(innerPrefix, append(left, right...)...))
|
||||
}
|
@@ -98,7 +98,7 @@ func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) {
|
||||
}
|
||||
|
||||
func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) {
|
||||
var poz ProofOperators
|
||||
poz := make(ProofOperators, 0, len(proof.Ops))
|
||||
for _, pop := range proof.Ops {
|
||||
operator, err := prt.Decode(pop)
|
||||
if err != nil {
|
||||
|
@@ -1,6 +1,8 @@
|
||||
package merkle
|
||||
|
||||
import (
|
||||
// it is ok to use math/rand here: we do not need a cryptographically secure random
|
||||
// number generator here and we can run the tests a bit faster
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
@@ -24,7 +26,7 @@ func TestKeyPath(t *testing.T) {
|
||||
keys[i][j] = alphanum[rand.Intn(len(alphanum))]
|
||||
}
|
||||
case KeyEncodingHex:
|
||||
rand.Read(keys[i])
|
||||
rand.Read(keys[i]) //nolint: gosec
|
||||
default:
|
||||
panic("Unexpected encoding")
|
||||
}
|
||||
|
@@ -71,11 +71,11 @@ func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) {
|
||||
hasher.Write(value) // does not error
|
||||
vhash := hasher.Sum(nil)
|
||||
|
||||
bz := new(bytes.Buffer)
|
||||
// Wrap <op.Key, vhash> to hash the KVPair.
|
||||
hasher = tmhash.New()
|
||||
encodeByteSlice(hasher, []byte(op.key)) // does not error
|
||||
encodeByteSlice(hasher, []byte(vhash)) // does not error
|
||||
kvhash := hasher.Sum(nil)
|
||||
encodeByteSlice(bz, []byte(op.key)) // does not error
|
||||
encodeByteSlice(bz, []byte(vhash)) // does not error
|
||||
kvhash := leafHash(bz.Bytes())
|
||||
|
||||
if !bytes.Equal(kvhash, op.Proof.LeafHash) {
|
||||
return nil, cmn.NewError("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash)
|
||||
|
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tendermint/go-amino"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
@@ -26,6 +26,7 @@ func NewDominoOp(key, input, output string) DominoOp {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func DominoOpDecoder(pop ProofOp) (ProofOperator, error) {
|
||||
if pop.Type != ProofOpDomino {
|
||||
panic("unexpected proof op type")
|
||||
|
97
crypto/merkle/rfc6962_test.go
Normal file
97
crypto/merkle/rfc6962_test.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package merkle
|
||||
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// These tests were taken from https://github.com/google/trillian/blob/master/merkle/rfc6962/rfc6962_test.go,
|
||||
// and consequently fall under the above license.
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
func TestRFC6962Hasher(t *testing.T) {
|
||||
_, leafHashTrail := trailsFromByteSlices([][]byte{[]byte("L123456")})
|
||||
leafHash := leafHashTrail.Hash
|
||||
_, leafHashTrail = trailsFromByteSlices([][]byte{{}})
|
||||
emptyLeafHash := leafHashTrail.Hash
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
got []byte
|
||||
want string
|
||||
}{
|
||||
// Since creating a merkle tree of no leaves is unsupported here, we skip
|
||||
// the corresponding trillian test vector.
|
||||
|
||||
// Check that the empty hash is not the same as the hash of an empty leaf.
|
||||
// echo -n 00 | xxd -r -p | sha256sum
|
||||
{
|
||||
desc: "RFC6962 Empty Leaf",
|
||||
want: "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"[:tmhash.Size*2],
|
||||
got: emptyLeafHash,
|
||||
},
|
||||
// echo -n 004C313233343536 | xxd -r -p | sha256sum
|
||||
{
|
||||
desc: "RFC6962 Leaf",
|
||||
want: "395aa064aa4c29f7010acfe3f25db9485bbd4b91897b6ad7ad547639252b4d56"[:tmhash.Size*2],
|
||||
got: leafHash,
|
||||
},
|
||||
// echo -n 014E3132334E343536 | xxd -r -p | sha256sum
|
||||
{
|
||||
desc: "RFC6962 Node",
|
||||
want: "aa217fe888e47007fa15edab33c2b492a722cb106c64667fc2b044444de66bbb"[:tmhash.Size*2],
|
||||
got: innerHash([]byte("N123"), []byte("N456")),
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
wantBytes, err := hex.DecodeString(tc.want)
|
||||
if err != nil {
|
||||
t.Fatalf("hex.DecodeString(%x): %v", tc.want, err)
|
||||
}
|
||||
if got, want := tc.got, wantBytes; !bytes.Equal(got, want) {
|
||||
t.Errorf("got %x, want %x", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRFC6962HasherCollisions(t *testing.T) {
|
||||
// Check that different leaves have different hashes.
|
||||
leaf1, leaf2 := []byte("Hello"), []byte("World")
|
||||
_, leafHashTrail := trailsFromByteSlices([][]byte{leaf1})
|
||||
hash1 := leafHashTrail.Hash
|
||||
_, leafHashTrail = trailsFromByteSlices([][]byte{leaf2})
|
||||
hash2 := leafHashTrail.Hash
|
||||
if bytes.Equal(hash1, hash2) {
|
||||
t.Errorf("Leaf hashes should differ, but both are %x", hash1)
|
||||
}
|
||||
// Compute an intermediate subtree hash.
|
||||
_, subHash1Trail := trailsFromByteSlices([][]byte{hash1, hash2})
|
||||
subHash1 := subHash1Trail.Hash
|
||||
// Check that this is not the same as a leaf hash of their concatenation.
|
||||
preimage := append(hash1, hash2...)
|
||||
_, forgedHashTrail := trailsFromByteSlices([][]byte{preimage})
|
||||
forgedHash := forgedHashTrail.Hash
|
||||
if bytes.Equal(subHash1, forgedHash) {
|
||||
t.Errorf("Hasher is not second-preimage resistant")
|
||||
}
|
||||
// Swap the order of nodes and check that the hash is different.
|
||||
_, subHash2Trail := trailsFromByteSlices([][]byte{hash2, hash1})
|
||||
subHash2 := subHash2Trail.Hash
|
||||
if bytes.Equal(subHash1, subHash2) {
|
||||
t.Errorf("Subtree hash does not depend on the order of leaves")
|
||||
}
|
||||
}
|
@@ -13,14 +13,14 @@ func TestSimpleMap(t *testing.T) {
|
||||
values []string // each string gets converted to []byte in test
|
||||
want string
|
||||
}{
|
||||
{[]string{"key1"}, []string{"value1"}, "321d150de16dceb51c72981b432b115045383259b1a550adf8dc80f927508967"},
|
||||
{[]string{"key1"}, []string{"value2"}, "2a9e4baf321eac99f6eecc3406603c14bc5e85bb7b80483cbfc75b3382d24a2f"},
|
||||
{[]string{"key1"}, []string{"value1"}, "a44d3cc7daba1a4600b00a2434b30f8b970652169810d6dfa9fb1793a2189324"},
|
||||
{[]string{"key1"}, []string{"value2"}, "0638e99b3445caec9d95c05e1a3fc1487b4ddec6a952ff337080360b0dcc078c"},
|
||||
// swap order with 2 keys
|
||||
{[]string{"key1", "key2"}, []string{"value1", "value2"}, "c4d8913ab543ba26aa970646d4c99a150fd641298e3367cf68ca45fb45a49881"},
|
||||
{[]string{"key2", "key1"}, []string{"value2", "value1"}, "c4d8913ab543ba26aa970646d4c99a150fd641298e3367cf68ca45fb45a49881"},
|
||||
{[]string{"key1", "key2"}, []string{"value1", "value2"}, "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3"},
|
||||
{[]string{"key2", "key1"}, []string{"value2", "value1"}, "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3"},
|
||||
// swap order with 3 keys
|
||||
{[]string{"key1", "key2", "key3"}, []string{"value1", "value2", "value3"}, "b23cef00eda5af4548a213a43793f2752d8d9013b3f2b64bc0523a4791196268"},
|
||||
{[]string{"key1", "key3", "key2"}, []string{"value1", "value3", "value2"}, "b23cef00eda5af4548a213a43793f2752d8d9013b3f2b64bc0523a4791196268"},
|
||||
{[]string{"key1", "key2", "key3"}, []string{"value1", "value2", "value3"}, "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc"},
|
||||
{[]string{"key1", "key3", "key2"}, []string{"value1", "value3", "value2"}, "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc"},
|
||||
}
|
||||
for i, tc := range tests {
|
||||
db := newSimpleMap()
|
||||
|
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
@@ -67,7 +66,8 @@ func SimpleProofsFromMap(m map[string][]byte) (rootHash []byte, proofs map[strin
|
||||
|
||||
// Verify that the SimpleProof proves the root hash.
|
||||
// Check sp.Index/sp.Total manually if needed
|
||||
func (sp *SimpleProof) Verify(rootHash []byte, leafHash []byte) error {
|
||||
func (sp *SimpleProof) Verify(rootHash []byte, leaf []byte) error {
|
||||
leafHash := leafHash(leaf)
|
||||
if sp.Total < 0 {
|
||||
return errors.New("Proof total must be positive")
|
||||
}
|
||||
@@ -128,19 +128,19 @@ func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][
|
||||
if len(innerHashes) == 0 {
|
||||
return nil
|
||||
}
|
||||
numLeft := (total + 1) / 2
|
||||
numLeft := getSplitPoint(total)
|
||||
if index < numLeft {
|
||||
leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1])
|
||||
if leftHash == nil {
|
||||
return nil
|
||||
}
|
||||
return simpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1])
|
||||
return innerHash(leftHash, innerHashes[len(innerHashes)-1])
|
||||
}
|
||||
rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1])
|
||||
if rightHash == nil {
|
||||
return nil
|
||||
}
|
||||
return simpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash)
|
||||
return innerHash(innerHashes[len(innerHashes)-1], rightHash)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,12 +182,13 @@ func trailsFromByteSlices(items [][]byte) (trails []*SimpleProofNode, root *Simp
|
||||
case 0:
|
||||
return nil, nil
|
||||
case 1:
|
||||
trail := &SimpleProofNode{tmhash.Sum(items[0]), nil, nil, nil}
|
||||
trail := &SimpleProofNode{leafHash(items[0]), nil, nil, nil}
|
||||
return []*SimpleProofNode{trail}, trail
|
||||
default:
|
||||
lefts, leftRoot := trailsFromByteSlices(items[:(len(items)+1)/2])
|
||||
rights, rightRoot := trailsFromByteSlices(items[(len(items)+1)/2:])
|
||||
rootHash := simpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash)
|
||||
k := getSplitPoint(len(items))
|
||||
lefts, leftRoot := trailsFromByteSlices(items[:k])
|
||||
rights, rightRoot := trailsFromByteSlices(items[k:])
|
||||
rootHash := innerHash(leftRoot.Hash, rightRoot.Hash)
|
||||
root := &SimpleProofNode{rootHash, nil, nil, nil}
|
||||
leftRoot.Parent = root
|
||||
leftRoot.Right = rightRoot
|
||||
|
@@ -1,23 +1,9 @@
|
||||
package merkle
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// simpleHashFromTwoHashes is the basic operation of the Merkle tree: Hash(left | right).
|
||||
func simpleHashFromTwoHashes(left, right []byte) []byte {
|
||||
var hasher = tmhash.New()
|
||||
err := encodeByteSlice(hasher, left)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = encodeByteSlice(hasher, right)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
// SimpleHashFromByteSlices computes a Merkle tree where the leaves are the byte slice,
|
||||
// in the provided order.
|
||||
func SimpleHashFromByteSlices(items [][]byte) []byte {
|
||||
@@ -25,11 +11,12 @@ func SimpleHashFromByteSlices(items [][]byte) []byte {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return tmhash.Sum(items[0])
|
||||
return leafHash(items[0])
|
||||
default:
|
||||
left := SimpleHashFromByteSlices(items[:(len(items)+1)/2])
|
||||
right := SimpleHashFromByteSlices(items[(len(items)+1)/2:])
|
||||
return simpleHashFromTwoHashes(left, right)
|
||||
k := getSplitPoint(len(items))
|
||||
left := SimpleHashFromByteSlices(items[:k])
|
||||
right := SimpleHashFromByteSlices(items[k:])
|
||||
return innerHash(left, right)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,3 +31,17 @@ func SimpleHashFromMap(m map[string][]byte) []byte {
|
||||
}
|
||||
return sm.Hash()
|
||||
}
|
||||
|
||||
// getSplitPoint returns the largest power of 2 less than length
|
||||
func getSplitPoint(length int) int {
|
||||
if length < 1 {
|
||||
panic("Trying to split a tree with size < 1")
|
||||
}
|
||||
uLength := uint(length)
|
||||
bitlen := bits.Len(uLength)
|
||||
k := 1 << uint(bitlen-1)
|
||||
if k == length {
|
||||
k >>= 1
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
@@ -34,7 +34,6 @@ func TestSimpleProof(t *testing.T) {
|
||||
|
||||
// For each item, check the trail.
|
||||
for i, item := range items {
|
||||
itemHash := tmhash.Sum(item)
|
||||
proof := proofs[i]
|
||||
|
||||
// Check total/index
|
||||
@@ -43,30 +42,53 @@ func TestSimpleProof(t *testing.T) {
|
||||
require.Equal(t, proof.Total, total, "Unmatched totals: %d vs %d", proof.Total, total)
|
||||
|
||||
// Verify success
|
||||
err := proof.Verify(rootHash, itemHash)
|
||||
require.NoError(t, err, "Verificatior failed: %v.", err)
|
||||
err := proof.Verify(rootHash, item)
|
||||
require.NoError(t, err, "Verification failed: %v.", err)
|
||||
|
||||
// Trail too long should make it fail
|
||||
origAunts := proof.Aunts
|
||||
proof.Aunts = append(proof.Aunts, cmn.RandBytes(32))
|
||||
err = proof.Verify(rootHash, itemHash)
|
||||
err = proof.Verify(rootHash, item)
|
||||
require.Error(t, err, "Expected verification to fail for wrong trail length")
|
||||
|
||||
proof.Aunts = origAunts
|
||||
|
||||
// Trail too short should make it fail
|
||||
proof.Aunts = proof.Aunts[0 : len(proof.Aunts)-1]
|
||||
err = proof.Verify(rootHash, itemHash)
|
||||
err = proof.Verify(rootHash, item)
|
||||
require.Error(t, err, "Expected verification to fail for wrong trail length")
|
||||
|
||||
proof.Aunts = origAunts
|
||||
|
||||
// Mutating the itemHash should make it fail.
|
||||
err = proof.Verify(rootHash, MutateByteSlice(itemHash))
|
||||
err = proof.Verify(rootHash, MutateByteSlice(item))
|
||||
require.Error(t, err, "Expected verification to fail for mutated leaf hash")
|
||||
|
||||
// Mutating the rootHash should make it fail.
|
||||
err = proof.Verify(MutateByteSlice(rootHash), itemHash)
|
||||
err = proof.Verify(MutateByteSlice(rootHash), item)
|
||||
require.Error(t, err, "Expected verification to fail for mutated root hash")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getSplitPoint(t *testing.T) {
|
||||
tests := []struct {
|
||||
length int
|
||||
want int
|
||||
}{
|
||||
{1, 0},
|
||||
{2, 1},
|
||||
{3, 2},
|
||||
{4, 2},
|
||||
{5, 4},
|
||||
{10, 8},
|
||||
{20, 16},
|
||||
{100, 64},
|
||||
{255, 128},
|
||||
{256, 128},
|
||||
{257, 256},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got := getSplitPoint(tt.length)
|
||||
require.Equal(t, tt.want, got, "getSplitPoint(%d) = %v, want %v", tt.length, got, tt.want)
|
||||
}
|
||||
}
|
||||
|
@@ -1,7 +1,7 @@
|
||||
package merkle
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
)
|
||||
|
||||
var cdc *amino.Codec
|
||||
|
@@ -2,7 +2,6 @@ package multisig
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
// PubKeyMultisigThreshold implements a K of N threshold multisig.
|
||||
@@ -11,7 +10,7 @@ type PubKeyMultisigThreshold struct {
|
||||
PubKeys []crypto.PubKey `json:"pubkeys"`
|
||||
}
|
||||
|
||||
var _ crypto.PubKey = &PubKeyMultisigThreshold{}
|
||||
var _ crypto.PubKey = PubKeyMultisigThreshold{}
|
||||
|
||||
// NewPubKeyMultisigThreshold returns a new PubKeyMultisigThreshold.
|
||||
// Panics if len(pubkeys) < k or 0 >= k.
|
||||
@@ -22,7 +21,7 @@ func NewPubKeyMultisigThreshold(k int, pubkeys []crypto.PubKey) crypto.PubKey {
|
||||
if len(pubkeys) < k {
|
||||
panic("threshold k of n multisignature: len(pubkeys) < k")
|
||||
}
|
||||
return &PubKeyMultisigThreshold{uint(k), pubkeys}
|
||||
return PubKeyMultisigThreshold{uint(k), pubkeys}
|
||||
}
|
||||
|
||||
// VerifyBytes expects sig to be an amino encoded version of a MultiSignature.
|
||||
@@ -31,8 +30,8 @@ func NewPubKeyMultisigThreshold(k int, pubkeys []crypto.PubKey) crypto.PubKey {
|
||||
// and all signatures are valid. (Not just k of the signatures)
|
||||
// The multisig uses a bitarray, so multiple signatures for the same key is not
|
||||
// a concern.
|
||||
func (pk *PubKeyMultisigThreshold) VerifyBytes(msg []byte, marshalledSig []byte) bool {
|
||||
var sig *Multisignature
|
||||
func (pk PubKeyMultisigThreshold) VerifyBytes(msg []byte, marshalledSig []byte) bool {
|
||||
var sig Multisignature
|
||||
err := cdc.UnmarshalBinaryBare(marshalledSig, &sig)
|
||||
if err != nil {
|
||||
return false
|
||||
@@ -64,19 +63,19 @@ func (pk *PubKeyMultisigThreshold) VerifyBytes(msg []byte, marshalledSig []byte)
|
||||
}
|
||||
|
||||
// Bytes returns the amino encoded version of the PubKeyMultisigThreshold
|
||||
func (pk *PubKeyMultisigThreshold) Bytes() []byte {
|
||||
func (pk PubKeyMultisigThreshold) Bytes() []byte {
|
||||
return cdc.MustMarshalBinaryBare(pk)
|
||||
}
|
||||
|
||||
// Address returns tmhash(PubKeyMultisigThreshold.Bytes())
|
||||
func (pk *PubKeyMultisigThreshold) Address() crypto.Address {
|
||||
return crypto.Address(tmhash.Sum(pk.Bytes()))
|
||||
func (pk PubKeyMultisigThreshold) Address() crypto.Address {
|
||||
return crypto.AddressHash(pk.Bytes())
|
||||
}
|
||||
|
||||
// Equals returns true iff pk and other both have the same number of keys, and
|
||||
// all constituent keys are the same, and in the same order.
|
||||
func (pk *PubKeyMultisigThreshold) Equals(other crypto.PubKey) bool {
|
||||
otherKey, sameType := other.(*PubKeyMultisigThreshold)
|
||||
func (pk PubKeyMultisigThreshold) Equals(other crypto.PubKey) bool {
|
||||
otherKey, sameType := other.(PubKeyMultisigThreshold)
|
||||
if !sameType {
|
||||
return false
|
||||
}
|
||||
|
@@ -82,7 +82,7 @@ func TestMultiSigPubKeyEquality(t *testing.T) {
|
||||
msg := []byte{1, 2, 3, 4}
|
||||
pubkeys, _ := generatePubKeysAndSignatures(5, msg)
|
||||
multisigKey := NewPubKeyMultisigThreshold(2, pubkeys)
|
||||
var unmarshalledMultisig *PubKeyMultisigThreshold
|
||||
var unmarshalledMultisig PubKeyMultisigThreshold
|
||||
cdc.MustUnmarshalBinaryBare(multisigKey.Bytes(), &unmarshalledMultisig)
|
||||
require.True(t, multisigKey.Equals(unmarshalledMultisig))
|
||||
|
||||
@@ -95,6 +95,29 @@ func TestMultiSigPubKeyEquality(t *testing.T) {
|
||||
require.False(t, multisigKey.Equals(multisigKey2))
|
||||
}
|
||||
|
||||
func TestAddress(t *testing.T) {
|
||||
msg := []byte{1, 2, 3, 4}
|
||||
pubkeys, _ := generatePubKeysAndSignatures(5, msg)
|
||||
multisigKey := NewPubKeyMultisigThreshold(2, pubkeys)
|
||||
require.Len(t, multisigKey.Address().Bytes(), 20)
|
||||
}
|
||||
|
||||
func TestPubKeyMultisigThresholdAminoToIface(t *testing.T) {
|
||||
msg := []byte{1, 2, 3, 4}
|
||||
pubkeys, _ := generatePubKeysAndSignatures(5, msg)
|
||||
multisigKey := NewPubKeyMultisigThreshold(2, pubkeys)
|
||||
|
||||
ab, err := cdc.MarshalBinaryLengthPrefixed(multisigKey)
|
||||
require.NoError(t, err)
|
||||
// like other crypto.Pubkey implementations (e.g. ed25519.PubKeyEd25519),
|
||||
// PubKeyMultisigThreshold should be deserializable into a crypto.PubKey:
|
||||
var pubKey crypto.PubKey
|
||||
err = cdc.UnmarshalBinaryLengthPrefixed(ab, &pubKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, multisigKey, pubKey)
|
||||
}
|
||||
|
||||
func generatePubKeysAndSignatures(n int, msg []byte) (pubkeys []crypto.PubKey, signatures [][]byte) {
|
||||
pubkeys = make([]crypto.PubKey, n)
|
||||
signatures = make([][]byte, n)
|
||||
|
@@ -20,7 +20,7 @@ func init() {
|
||||
cdc.RegisterConcrete(PubKeyMultisigThreshold{},
|
||||
PubKeyMultisigThresholdAminoRoute, nil)
|
||||
cdc.RegisterConcrete(ed25519.PubKeyEd25519{},
|
||||
ed25519.PubKeyAminoRoute, nil)
|
||||
ed25519.PubKeyAminoName, nil)
|
||||
cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{},
|
||||
secp256k1.PubKeyAminoRoute, nil)
|
||||
secp256k1.PubKeyAminoName, nil)
|
||||
}
|
||||
|
104
crypto/random.go
104
crypto/random.go
@@ -1,42 +1,11 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
crand "crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/crypto/chacha20poly1305"
|
||||
)
|
||||
|
||||
// NOTE: This is ignored for now until we have time
|
||||
// to properly review the MixEntropy function - https://github.com/tendermint/tendermint/issues/2099.
|
||||
//
|
||||
// The randomness here is derived from xoring a chacha20 keystream with
|
||||
// output from crypto/rand's OS Entropy Reader. (Due to fears of the OS'
|
||||
// entropy being backdoored)
|
||||
//
|
||||
// For forward secrecy of produced randomness, the internal chacha key is hashed
|
||||
// and thereby rotated after each call.
|
||||
var gRandInfo *randInfo
|
||||
|
||||
func init() {
|
||||
gRandInfo = &randInfo{}
|
||||
|
||||
// TODO: uncomment after reviewing MixEntropy -
|
||||
// https://github.com/tendermint/tendermint/issues/2099
|
||||
// gRandInfo.MixEntropy(randBytes(32)) // Init
|
||||
}
|
||||
|
||||
// WARNING: This function needs review - https://github.com/tendermint/tendermint/issues/2099.
|
||||
// Mix additional bytes of randomness, e.g. from hardware, user-input, etc.
|
||||
// It is OK to call it multiple times. It does not diminish security.
|
||||
func MixEntropy(seedBytes []byte) {
|
||||
gRandInfo.MixEntropy(seedBytes)
|
||||
}
|
||||
|
||||
// This only uses the OS's randomness
|
||||
func randBytes(numBytes int) []byte {
|
||||
b := make([]byte, numBytes)
|
||||
@@ -52,19 +21,6 @@ func CRandBytes(numBytes int) []byte {
|
||||
return randBytes(numBytes)
|
||||
}
|
||||
|
||||
/* TODO: uncomment after reviewing MixEntropy - https://github.com/tendermint/tendermint/issues/2099
|
||||
// This uses the OS and the Seed(s).
|
||||
func CRandBytes(numBytes int) []byte {
|
||||
return randBytes(numBytes)
|
||||
b := make([]byte, numBytes)
|
||||
_, err := gRandInfo.Read(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
*/
|
||||
|
||||
// CRandHex returns a hex encoded string that's floor(numDigits/2) * 2 long.
|
||||
//
|
||||
// Note: CRandHex(24) gives 96 bits of randomness that
|
||||
@@ -77,63 +33,3 @@ func CRandHex(numDigits int) string {
|
||||
func CReader() io.Reader {
|
||||
return crand.Reader
|
||||
}
|
||||
|
||||
/* TODO: uncomment after reviewing MixEntropy - https://github.com/tendermint/tendermint/issues/2099
|
||||
// Returns a crand.Reader mixed with user-supplied entropy
|
||||
func CReader() io.Reader {
|
||||
return gRandInfo
|
||||
}
|
||||
*/
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
type randInfo struct {
|
||||
mtx sync.Mutex
|
||||
seedBytes [chacha20poly1305.KeySize]byte
|
||||
chacha cipher.AEAD
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
// You can call this as many times as you'd like.
|
||||
// XXX/TODO: review - https://github.com/tendermint/tendermint/issues/2099
|
||||
func (ri *randInfo) MixEntropy(seedBytes []byte) {
|
||||
ri.mtx.Lock()
|
||||
defer ri.mtx.Unlock()
|
||||
// Make new ri.seedBytes using passed seedBytes and current ri.seedBytes:
|
||||
// ri.seedBytes = sha256( seedBytes || ri.seedBytes )
|
||||
h := sha256.New()
|
||||
h.Write(seedBytes)
|
||||
h.Write(ri.seedBytes[:])
|
||||
hashBytes := h.Sum(nil)
|
||||
copy(ri.seedBytes[:], hashBytes)
|
||||
chacha, err := chacha20poly1305.New(ri.seedBytes[:])
|
||||
if err != nil {
|
||||
panic("Initializing chacha20 failed")
|
||||
}
|
||||
ri.chacha = chacha
|
||||
// Create new reader
|
||||
ri.reader = &cipher.StreamReader{S: ri, R: crand.Reader}
|
||||
}
|
||||
|
||||
func (ri *randInfo) XORKeyStream(dst, src []byte) {
|
||||
// nonce being 0 is safe due to never re-using a key.
|
||||
emptyNonce := make([]byte, 12)
|
||||
tmpDst := ri.chacha.Seal([]byte{}, emptyNonce, src, []byte{0})
|
||||
// this removes the poly1305 tag as well, since chacha is a stream cipher
|
||||
// and we truncate at input length.
|
||||
copy(dst, tmpDst[:len(src)])
|
||||
// hash seedBytes for forward secrecy, and initialize new chacha instance
|
||||
newSeed := sha256.Sum256(ri.seedBytes[:])
|
||||
chacha, err := chacha20poly1305.New(newSeed[:])
|
||||
if err != nil {
|
||||
panic("Initializing chacha20 failed")
|
||||
}
|
||||
ri.chacha = chacha
|
||||
}
|
||||
|
||||
func (ri *randInfo) Read(b []byte) (n int, err error) {
|
||||
ri.mtx.Lock()
|
||||
n, err = ri.reader.Read(b)
|
||||
ri.mtx.Unlock()
|
||||
return
|
||||
}
|
||||
|
@@ -7,17 +7,19 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
secp256k1 "github.com/tendermint/btcd/btcec"
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"golang.org/x/crypto/ripemd160" // forked to github.com/tendermint/crypto
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
)
|
||||
|
||||
//-------------------------------------
|
||||
const (
|
||||
PrivKeyAminoRoute = "tendermint/PrivKeySecp256k1"
|
||||
PubKeyAminoRoute = "tendermint/PubKeySecp256k1"
|
||||
PrivKeyAminoName = "tendermint/PrivKeySecp256k1"
|
||||
PubKeyAminoName = "tendermint/PubKeySecp256k1"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
@@ -25,11 +27,11 @@ var cdc = amino.NewCodec()
|
||||
func init() {
|
||||
cdc.RegisterInterface((*crypto.PubKey)(nil), nil)
|
||||
cdc.RegisterConcrete(PubKeySecp256k1{},
|
||||
PubKeyAminoRoute, nil)
|
||||
PubKeyAminoName, nil)
|
||||
|
||||
cdc.RegisterInterface((*crypto.PrivKey)(nil), nil)
|
||||
cdc.RegisterConcrete(PrivKeySecp256k1{},
|
||||
PrivKeyAminoRoute, nil)
|
||||
PrivKeyAminoName, nil)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
@@ -44,16 +46,6 @@ func (privKey PrivKeySecp256k1) Bytes() []byte {
|
||||
return cdc.MustMarshalBinaryBare(privKey)
|
||||
}
|
||||
|
||||
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
|
||||
func (privKey PrivKeySecp256k1) Sign(msg []byte) ([]byte, error) {
|
||||
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:])
|
||||
sig, err := priv.Sign(crypto.Sha256(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sig.Serialize(), nil
|
||||
}
|
||||
|
||||
// PubKey performs the point-scalar multiplication from the privKey on the
|
||||
// generator point to get the pubkey.
|
||||
func (privKey PrivKeySecp256k1) PubKey() crypto.PubKey {
|
||||
@@ -137,20 +129,6 @@ func (pubKey PubKeySecp256k1) Bytes() []byte {
|
||||
return bz
|
||||
}
|
||||
|
||||
func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sig []byte) bool {
|
||||
pub, err := secp256k1.ParsePubKey(pubKey[:], secp256k1.S256())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
parsedSig, err := secp256k1.ParseSignature(sig[:], secp256k1.S256())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Underlying library ensures that this signature is in canonical form, to
|
||||
// prevent Secp256k1 malleability from altering the sign of the s term.
|
||||
return parsedSig.Verify(crypto.Sha256(msg), pub)
|
||||
}
|
||||
|
||||
func (pubKey PubKeySecp256k1) String() string {
|
||||
return fmt.Sprintf("PubKeySecp256k1{%X}", pubKey[:])
|
||||
}
|
||||
|
24
crypto/secp256k1/secp256k1_cgo.go
Normal file
24
crypto/secp256k1/secp256k1_cgo.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// +build libsecp256k1
|
||||
|
||||
package secp256k1
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/crypto/secp256k1"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
)
|
||||
|
||||
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
|
||||
func (privKey PrivKeySecp256k1) Sign(msg []byte) ([]byte, error) {
|
||||
rsv, err := secp256k1.Sign(crypto.Sha256(msg), privKey[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// we do not need v in r||s||v:
|
||||
rs := rsv[:len(rsv)-1]
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sig []byte) bool {
|
||||
return secp256k1.VerifySignature(pubKey[:], crypto.Sha256(msg), sig)
|
||||
}
|
70
crypto/secp256k1/secp256k1_nocgo.go
Normal file
70
crypto/secp256k1/secp256k1_nocgo.go
Normal file
@@ -0,0 +1,70 @@
|
||||
// +build !libsecp256k1
|
||||
|
||||
package secp256k1
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
)
|
||||
|
||||
// used to reject malleable signatures
|
||||
// see:
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
// - https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/crypto.go#L39
|
||||
var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1)
|
||||
|
||||
// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg.
|
||||
// The returned signature will be of the form R || S (in lower-S form).
|
||||
func (privKey PrivKeySecp256k1) Sign(msg []byte) ([]byte, error) {
|
||||
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:])
|
||||
sig, err := priv.Sign(crypto.Sha256(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigBytes := serializeSig(sig)
|
||||
return sigBytes, nil
|
||||
}
|
||||
|
||||
// VerifyBytes verifies a signature of the form R || S.
|
||||
// It rejects signatures which are not in lower-S form.
|
||||
func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sigStr []byte) bool {
|
||||
if len(sigStr) != 64 {
|
||||
return false
|
||||
}
|
||||
pub, err := secp256k1.ParsePubKey(pubKey[:], secp256k1.S256())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// parse the signature:
|
||||
signature := signatureFromBytes(sigStr)
|
||||
// Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
|
||||
// see: https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93
|
||||
if signature.S.Cmp(secp256k1halfN) > 0 {
|
||||
return false
|
||||
}
|
||||
return signature.Verify(crypto.Sha256(msg), pub)
|
||||
}
|
||||
|
||||
// Read Signature struct from R || S. Caller needs to ensure
|
||||
// that len(sigStr) == 64.
|
||||
func signatureFromBytes(sigStr []byte) *secp256k1.Signature {
|
||||
return &secp256k1.Signature{
|
||||
R: new(big.Int).SetBytes(sigStr[:32]),
|
||||
S: new(big.Int).SetBytes(sigStr[32:64]),
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize signature to R || S.
|
||||
// R, S are padded to 32 bytes respectively.
|
||||
func serializeSig(sig *secp256k1.Signature) []byte {
|
||||
rBytes := sig.R.Bytes()
|
||||
sBytes := sig.S.Bytes()
|
||||
sigBytes := make([]byte, 64)
|
||||
// 0 pad the byte arrays from the left if they aren't big enough.
|
||||
copy(sigBytes[32-len(rBytes):32], rBytes)
|
||||
copy(sigBytes[64-len(sBytes):64], sBytes)
|
||||
return sigBytes
|
||||
}
|
39
crypto/secp256k1/secp256k1_nocgo_test.go
Normal file
39
crypto/secp256k1/secp256k1_nocgo_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
// +build !libsecp256k1
|
||||
|
||||
package secp256k1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Ensure that signature verification works, and that
|
||||
// non-canonical signatures fail.
|
||||
// Note: run with CGO_ENABLED=0 or go test -tags !cgo.
|
||||
func TestSignatureVerificationAndRejectUpperS(t *testing.T) {
|
||||
msg := []byte("We have lingered long enough on the shores of the cosmic ocean.")
|
||||
for i := 0; i < 500; i++ {
|
||||
priv := GenPrivKey()
|
||||
sigStr, err := priv.Sign(msg)
|
||||
require.NoError(t, err)
|
||||
sig := signatureFromBytes(sigStr)
|
||||
require.False(t, sig.S.Cmp(secp256k1halfN) > 0)
|
||||
|
||||
pub := priv.PubKey()
|
||||
require.True(t, pub.VerifyBytes(msg, sigStr))
|
||||
|
||||
// malleate:
|
||||
sig.S.Sub(secp256k1.S256().CurveParams.N, sig.S)
|
||||
require.True(t, sig.S.Cmp(secp256k1halfN) > 0)
|
||||
malSigStr := serializeSig(sig)
|
||||
|
||||
require.False(t, pub.VerifyBytes(msg, malSigStr),
|
||||
"VerifyBytes incorrect with malleated & invalid S. sig=%v, key=%v",
|
||||
sig,
|
||||
priv,
|
||||
)
|
||||
}
|
||||
}
|
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
|
||||
underlyingSecp256k1 "github.com/tendermint/btcd/btcec"
|
||||
underlyingSecp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
)
|
||||
|
||||
type keyData struct {
|
||||
|
@@ -8,7 +8,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/crypto/chacha20poly1305" // forked to github.com/tendermint/crypto
|
||||
"golang.org/x/crypto/chacha20poly1305"
|
||||
)
|
||||
|
||||
// Implements crypto.AEAD
|
||||
|
@@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/crypto/nacl/secretbox" // forked to github.com/tendermint/crypto
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
@@ -17,7 +17,6 @@ const secretLen = 32
|
||||
|
||||
// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase))
|
||||
// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext.
|
||||
// NOTE: call crypto.MixEntropy() first.
|
||||
func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) {
|
||||
if len(secret) != secretLen {
|
||||
cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret)))
|
||||
|
@@ -6,15 +6,13 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"golang.org/x/crypto/bcrypt" // forked to github.com/tendermint/crypto
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
)
|
||||
|
||||
func TestSimple(t *testing.T) {
|
||||
|
||||
crypto.MixEntropy([]byte("someentropy"))
|
||||
|
||||
plaintext := []byte("sometext")
|
||||
secret := []byte("somesecretoflengththirtytwo===32")
|
||||
ciphertext := EncryptSymmetric(plaintext, secret)
|
||||
@@ -26,13 +24,9 @@ func TestSimple(t *testing.T) {
|
||||
|
||||
func TestSimpleWithKDF(t *testing.T) {
|
||||
|
||||
crypto.MixEntropy([]byte("someentropy"))
|
||||
|
||||
plaintext := []byte("sometext")
|
||||
secretPass := []byte("somesecret")
|
||||
salt := []byte("somesaltsomesalt") // len 16
|
||||
// NOTE: we use a fork of x/crypto so we can inject our own randomness for salt
|
||||
secret, err := bcrypt.GenerateFromPassword(salt, secretPass, 12)
|
||||
secret, err := bcrypt.GenerateFromPassword(secretPass, 12)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@@ -8,8 +8,21 @@ module.exports = {
|
||||
lineNumbers: true
|
||||
},
|
||||
themeConfig: {
|
||||
lastUpdated: "Last Updated",
|
||||
nav: [{ text: "Back to Tendermint", link: "https://tendermint.com" }],
|
||||
repo: "tendermint/tendermint",
|
||||
editLinks: true,
|
||||
docsDir: "docs",
|
||||
docsBranch: "develop",
|
||||
editLinkText: 'Edit this page on Github',
|
||||
lastUpdated: true,
|
||||
algolia: {
|
||||
apiKey: '59f0e2deb984aa9cdf2b3a5fd24ac501',
|
||||
indexName: 'tendermint',
|
||||
debug: false
|
||||
},
|
||||
nav: [
|
||||
{ text: "Back to Tendermint", link: "https://tendermint.com" },
|
||||
{ text: "RPC", link: "https://tendermint.com/rpc/" }
|
||||
],
|
||||
sidebar: [
|
||||
{
|
||||
title: "Introduction",
|
||||
@@ -21,6 +34,20 @@ module.exports = {
|
||||
"/introduction/what-is-tendermint"
|
||||
]
|
||||
},
|
||||
{
|
||||
title: "Apps",
|
||||
collapsable: false,
|
||||
children: [
|
||||
"/app-dev/getting-started",
|
||||
"/app-dev/abci-cli",
|
||||
"/app-dev/app-architecture",
|
||||
"/app-dev/app-development",
|
||||
"/app-dev/subscribing-to-events-via-websocket",
|
||||
"/app-dev/indexing-transactions",
|
||||
"/app-dev/abci-spec",
|
||||
"/app-dev/ecosystem"
|
||||
]
|
||||
},
|
||||
{
|
||||
title: "Tendermint Core",
|
||||
collapsable: false,
|
||||
@@ -39,15 +66,6 @@ module.exports = {
|
||||
"/tendermint-core/validators"
|
||||
]
|
||||
},
|
||||
{
|
||||
title: "Tools",
|
||||
collapsable: false,
|
||||
children: [
|
||||
"/tools/",
|
||||
"/tools/benchmarking",
|
||||
"/tools/monitoring"
|
||||
]
|
||||
},
|
||||
{
|
||||
title: "Networks",
|
||||
collapsable: false,
|
||||
@@ -58,17 +76,13 @@ module.exports = {
|
||||
]
|
||||
},
|
||||
{
|
||||
title: "Apps",
|
||||
title: "Tools",
|
||||
collapsable: false,
|
||||
children: [
|
||||
"/app-dev/getting-started",
|
||||
"/app-dev/abci-cli",
|
||||
"/app-dev/app-architecture",
|
||||
"/app-dev/app-development",
|
||||
"/app-dev/subscribing-to-events-via-websocket",
|
||||
"/app-dev/indexing-transactions",
|
||||
"/app-dev/abci-spec",
|
||||
"/app-dev/ecosystem"
|
||||
children: [
|
||||
"/tools/",
|
||||
"/tools/benchmarking",
|
||||
"/tools/monitoring",
|
||||
"/tools/remote-signer-validation"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@@ -12,10 +12,10 @@ respectively.
|
||||
|
||||
## How It Works
|
||||
|
||||
There is a Jenkins job listening for changes in the `/docs` directory, on both
|
||||
There is a CircleCI job listening for changes in the `/docs` directory, on both
|
||||
the `master` and `develop` branches. Any updates to files in this directory
|
||||
on those branches will automatically trigger a website deployment. Under the hood,
|
||||
a private website repository has make targets consumed by a standard Jenkins task.
|
||||
the private website repository has a `make build-docs` target consumed by a CircleCI job in that repo.
|
||||
|
||||
## README
|
||||
|
||||
@@ -93,6 +93,10 @@ python -m SimpleHTTPServer 8080
|
||||
|
||||
then navigate to localhost:8080 in your browser.
|
||||
|
||||
## Search
|
||||
|
||||
We are using [Algolia](https://www.algolia.com) to power full-text search. This uses a public API search-only key in the `config.js` as well as a [tendermint.json](https://github.com/algolia/docsearch-configs/blob/master/configs/tendermint.json) configuration file that we can update with PRs.
|
||||
|
||||
## Consistency
|
||||
|
||||
Because the build processes are identical (as is the information contained herein), this file should be kept in sync as
|
||||
|
@@ -11,13 +11,10 @@ Make sure you [have Go installed](https://golang.org/doc/install).
|
||||
Next, install the `abci-cli` tool and example applications:
|
||||
|
||||
```
|
||||
go get github.com/tendermint/tendermint
|
||||
```
|
||||
|
||||
to get vendored dependencies:
|
||||
|
||||
```
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
mkdir -p $GOPATH/src/github.com/tendermint
|
||||
cd $GOPATH/src/github.com/tendermint
|
||||
git clone https://github.com/tendermint/tendermint.git
|
||||
cd tendermint
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make install_abci
|
||||
|
@@ -5,7 +5,7 @@ Tendermint blockchain application.
|
||||
|
||||
The following diagram provides a superb example:
|
||||
|
||||
<https://drive.google.com/open?id=1yR2XpRi9YCY9H9uMfcw8-RMJpvDyvjz9>
|
||||

|
||||
|
||||
The end-user application here is the Cosmos Voyager, at the bottom left.
|
||||
Voyager communicates with a REST API exposed by a local Light-Client
|
||||
@@ -45,6 +45,6 @@ Tendermint.
|
||||
See the following for more extensive documentation:
|
||||
|
||||
- [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1028)
|
||||
- [Tendermint RPC Docs](https://tendermint.github.io/slate/)
|
||||
- [Tendermint RPC Docs](https://tendermint.com/rpc/)
|
||||
- [Tendermint in Production](../tendermint-core/running-in-production.md)
|
||||
- [ABCI spec](./abci-spec.md)
|
||||
|
@@ -63,6 +63,13 @@
|
||||
"author": "Zach Balder",
|
||||
"description": "Public service reporting and tracking"
|
||||
},
|
||||
{
|
||||
"name": "ParadigmCore",
|
||||
"url": "https://github.com/ParadigmFoundation/ParadigmCore",
|
||||
"language": "TypeScript",
|
||||
"author": "Paradigm Labs",
|
||||
"description": "Reference implementation of the Paradigm Protocol, and OrderStream network client."
|
||||
},
|
||||
{
|
||||
"name": "Passchain",
|
||||
"url": "https://github.com/trusch/passchain",
|
||||
@@ -122,7 +129,7 @@
|
||||
],
|
||||
"abciServers": [
|
||||
{
|
||||
"name": "abci",
|
||||
"name": "go-abci",
|
||||
"url": "https://github.com/tendermint/tendermint/tree/master/abci",
|
||||
"language": "Go",
|
||||
"author": "Tendermint"
|
||||
@@ -133,6 +140,12 @@
|
||||
"language": "Javascript",
|
||||
"author": "Tendermint"
|
||||
},
|
||||
{
|
||||
"name": "rust-tsp",
|
||||
"url": "https://github.com/tendermint/rust-tsp",
|
||||
"language": "Rust",
|
||||
"author": "Tendermint"
|
||||
},
|
||||
{
|
||||
"name": "cpp-tmsp",
|
||||
"url": "https://github.com/mdyring/cpp-tmsp",
|
||||
@@ -164,7 +177,7 @@
|
||||
"author": "Dave Bryson"
|
||||
},
|
||||
{
|
||||
"name": "tm-abci",
|
||||
"name": "tm-abci (fork of py-abci with async IO)",
|
||||
"url": "https://github.com/SoftblocksCo/tm-abci",
|
||||
"language": "Python",
|
||||
"author": "Softblocks"
|
||||
@@ -175,5 +188,13 @@
|
||||
"language": "Javascript",
|
||||
"author": "Dennis McKinnon"
|
||||
}
|
||||
],
|
||||
"aminoLibraries": [
|
||||
{
|
||||
"name": "JS-Amino",
|
||||
"url": "https://github.com/TanNgocDo/Js-Amino",
|
||||
"language": "Javascript",
|
||||
"author": "TanNgocDo"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@@ -1,11 +1,9 @@
|
||||
# Ecosystem
|
||||
|
||||
The growing list of applications built using various pieces of the
|
||||
Tendermint stack can be found at:
|
||||
Tendermint stack can be found at the [ecosystem page](https://tendermint.com/ecosystem).
|
||||
|
||||
- https://tendermint.com/ecosystem
|
||||
|
||||
We thank the community for their contributions thus far and welcome the
|
||||
We thank the community for their contributions and welcome the
|
||||
addition of new projects. A pull request can be submitted to [this
|
||||
file](https://github.com/tendermint/tendermint/blob/master/docs/app-dev/ecosystem.json)
|
||||
to include your project.
|
||||
|
@@ -252,14 +252,12 @@ we'll run a Javascript version of the `counter`. To run it, you'll need
|
||||
to [install node](https://nodejs.org/en/download/).
|
||||
|
||||
You'll also need to fetch the relevant repository, from
|
||||
[here](https://github.com/tendermint/js-abci) then install it. As go
|
||||
devs, we keep all our code under the `$GOPATH`, so run:
|
||||
[here](https://github.com/tendermint/js-abci), then install it:
|
||||
|
||||
```
|
||||
go get github.com/tendermint/js-abci &> /dev/null
|
||||
cd $GOPATH/src/github.com/tendermint/js-abci/example
|
||||
npm install
|
||||
cd ..
|
||||
git clone https://github.com/tendermint/js-abci.git
|
||||
cd js-abci
|
||||
npm install abci
|
||||
```
|
||||
|
||||
Kill the previous `counter` and `tendermint` processes. Now run the app:
|
||||
@@ -276,13 +274,16 @@ tendermint node
|
||||
```
|
||||
|
||||
Once again, you should see blocks streaming by - but now, our
|
||||
application is written in javascript! Try sending some transactions, and
|
||||
application is written in Javascript! Try sending some transactions, and
|
||||
like before - the results should be the same:
|
||||
|
||||
```
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x00 # ok
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x05 # invalid nonce
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x01 # ok
|
||||
# ok
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x00
|
||||
# invalid nonce
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x05
|
||||
# ok
|
||||
curl localhost:26657/broadcast_tx_commit?tx=0x01
|
||||
```
|
||||
|
||||
Neat, eh?
|
||||
|
@@ -78,7 +78,7 @@ endpoint:
|
||||
curl "localhost:26657/tx_search?query=\"account.name='igor'\"&prove=true"
|
||||
```
|
||||
|
||||
Check out [API docs](https://tendermint.github.io/slate/?shell#txsearch)
|
||||
Check out [API docs](https://tendermint.com/rpc/#txsearch)
|
||||
for more information on query syntax and other options.
|
||||
|
||||
## Subscribing to transactions
|
||||
@@ -97,5 +97,5 @@ by providing a query to `/subscribe` RPC endpoint.
|
||||
}
|
||||
```
|
||||
|
||||
Check out [API docs](https://tendermint.github.io/slate/#subscribe) for
|
||||
Check out [API docs](https://tendermint.com/rpc/#subscribe) for
|
||||
more information on query syntax and other options.
|
||||
|
@@ -54,7 +54,7 @@ Response:
|
||||
"value": "ww0z4WaZ0Xg+YI10w43wTWbBmM3dpVza4mmSQYsd0ck="
|
||||
},
|
||||
"voting_power": "10",
|
||||
"accum": "0"
|
||||
"proposer_priority": "0"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@@ -7,6 +7,7 @@
|
||||
28-08-2018: Third version after Ethan's comments
|
||||
30-08-2018: AminoOverheadForBlock => MaxAminoOverheadForBlock
|
||||
31-08-2018: Bounding evidence and chain ID
|
||||
13-01-2019: Add section on MaxBytes vs MaxDataBytes
|
||||
|
||||
## Context
|
||||
|
||||
@@ -20,6 +21,32 @@ We should just remove MaxTxs all together and stick with MaxBytes, and have a
|
||||
But we can't just reap BlockSize.MaxBytes, since MaxBytes is for the entire block,
|
||||
not for the txs inside the block. There's extra amino overhead + the actual
|
||||
headers on top of the actual transactions + evidence + last commit.
|
||||
We could also consider using a MaxDataBytes instead of or in addition to MaxBytes.
|
||||
|
||||
## MaxBytes vs MaxDataBytes
|
||||
|
||||
The [PR #3045](https://github.com/tendermint/tendermint/pull/3045) suggested
|
||||
additional clarity/justification was necessary here, wither respect to the use
|
||||
of MaxDataBytes in addition to, or instead of, MaxBytes.
|
||||
|
||||
MaxBytes provides a clear limit on the total size of a block that requires no
|
||||
additional calculation if you want to use it to bound resource usage, and there
|
||||
has been considerable discussions about optimizing tendermint around 1MB blocks.
|
||||
Regardless, we need some maximum on the size of a block so we can avoid
|
||||
unmarshalling blocks that are too big during the consensus, and it seems more
|
||||
straightforward to provide a single fixed number for this rather than a
|
||||
computation of "MaxDataBytes + everything else you need to make room for
|
||||
(signatures, evidence, header)". MaxBytes provides a simple bound so we can
|
||||
always say "blocks are less than X MB".
|
||||
|
||||
Having both MaxBytes and MaxDataBytes feels like unnecessary complexity. It's
|
||||
not particularly surprising for MaxBytes to imply the maximum size of the
|
||||
entire block (not just txs), one just has to know that a block includes header,
|
||||
txs, evidence, votes. For more fine grained control over the txs included in the
|
||||
block, there is the MaxGas. In practice, the MaxGas may be expected to do most of
|
||||
the tx throttling, and the MaxBytes to just serve as an upper bound on the total
|
||||
size. Applications can use MaxGas as a MaxDataBytes by just taking the gas for
|
||||
every tx to be its size in bytes.
|
||||
|
||||
## Proposed solution
|
||||
|
||||
@@ -61,7 +88,7 @@ MaxXXX stayed the same.
|
||||
|
||||
## Status
|
||||
|
||||
Proposed.
|
||||
Accepted.
|
||||
|
||||
## Consequences
|
||||
|
||||
|
@@ -126,6 +126,312 @@ func TestConsensusXXX(t *testing.T) {
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Consensus Executor
|
||||
|
||||
## Consensus Core
|
||||
|
||||
```go
|
||||
type Event interface{}
|
||||
|
||||
type EventNewHeight struct {
|
||||
Height int64
|
||||
ValidatorId int
|
||||
}
|
||||
|
||||
type EventNewRound HeightAndRound
|
||||
|
||||
type EventProposal struct {
|
||||
Height int64
|
||||
Round int
|
||||
Timestamp Time
|
||||
BlockID BlockID
|
||||
POLRound int
|
||||
Sender int
|
||||
}
|
||||
|
||||
type Majority23PrevotesBlock struct {
|
||||
Height int64
|
||||
Round int
|
||||
BlockID BlockID
|
||||
}
|
||||
|
||||
type Majority23PrecommitBlock struct {
|
||||
Height int64
|
||||
Round int
|
||||
BlockID BlockID
|
||||
}
|
||||
|
||||
type HeightAndRound struct {
|
||||
Height int64
|
||||
Round int
|
||||
}
|
||||
|
||||
type Majority23PrevotesAny HeightAndRound
|
||||
type Majority23PrecommitAny HeightAndRound
|
||||
type TimeoutPropose HeightAndRound
|
||||
type TimeoutPrevotes HeightAndRound
|
||||
type TimeoutPrecommit HeightAndRound
|
||||
|
||||
|
||||
type Message interface{}
|
||||
|
||||
type MessageProposal struct {
|
||||
Height int64
|
||||
Round int
|
||||
BlockID BlockID
|
||||
POLRound int
|
||||
}
|
||||
|
||||
type VoteType int
|
||||
|
||||
const (
|
||||
VoteTypeUnknown VoteType = iota
|
||||
Prevote
|
||||
Precommit
|
||||
)
|
||||
|
||||
|
||||
type MessageVote struct {
|
||||
Height int64
|
||||
Round int
|
||||
BlockID BlockID
|
||||
Type VoteType
|
||||
}
|
||||
|
||||
|
||||
type MessageDecision struct {
|
||||
Height int64
|
||||
Round int
|
||||
BlockID BlockID
|
||||
}
|
||||
|
||||
type TriggerTimeout struct {
|
||||
Height int64
|
||||
Round int
|
||||
Duration Duration
|
||||
}
|
||||
|
||||
|
||||
type RoundStep int
|
||||
|
||||
const (
|
||||
RoundStepUnknown RoundStep = iota
|
||||
RoundStepPropose
|
||||
RoundStepPrevote
|
||||
RoundStepPrecommit
|
||||
RoundStepCommit
|
||||
)
|
||||
|
||||
type State struct {
|
||||
Height int64
|
||||
Round int
|
||||
Step RoundStep
|
||||
LockedValue BlockID
|
||||
LockedRound int
|
||||
ValidValue BlockID
|
||||
ValidRound int
|
||||
ValidatorId int
|
||||
ValidatorSetSize int
|
||||
}
|
||||
|
||||
func proposer(height int64, round int) int {}
|
||||
func getValue() BlockID {}
|
||||
|
||||
func Consensus(event Event, state State) (State, Message, TriggerTimeout) {
|
||||
msg = nil
|
||||
timeout = nil
|
||||
switch event := event.(type) {
|
||||
case EventNewHeight:
|
||||
if event.Height > state.Height {
|
||||
state.Height = event.Height
|
||||
state.Round = -1
|
||||
state.Step = RoundStepPropose
|
||||
state.LockedValue = nil
|
||||
state.LockedRound = -1
|
||||
state.ValidValue = nil
|
||||
state.ValidRound = -1
|
||||
state.ValidatorId = event.ValidatorId
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case EventNewRound:
|
||||
if event.Height == state.Height and event.Round > state.Round {
|
||||
state.Round = eventRound
|
||||
state.Step = RoundStepPropose
|
||||
if proposer(state.Height, state.Round) == state.ValidatorId {
|
||||
proposal = state.ValidValue
|
||||
if proposal == nil {
|
||||
proposal = getValue()
|
||||
}
|
||||
msg = MessageProposal { state.Height, state.Round, proposal, state.ValidRound }
|
||||
}
|
||||
timeout = TriggerTimeout { state.Height, state.Round, timeoutPropose(state.Round) }
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case EventProposal:
|
||||
if event.Height == state.Height and event.Round == state.Round and
|
||||
event.Sender == proposal(state.Height, state.Round) and state.Step == RoundStepPropose {
|
||||
if event.POLRound >= state.LockedRound or event.BlockID == state.BlockID or state.LockedRound == -1 {
|
||||
msg = MessageVote { state.Height, state.Round, event.BlockID, Prevote }
|
||||
}
|
||||
state.Step = RoundStepPrevote
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case TimeoutPropose:
|
||||
if event.Height == state.Height and event.Round == state.Round and state.Step == RoundStepPropose {
|
||||
msg = MessageVote { state.Height, state.Round, nil, Prevote }
|
||||
state.Step = RoundStepPrevote
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case Majority23PrevotesBlock:
|
||||
if event.Height == state.Height and event.Round == state.Round and state.Step >= RoundStepPrevote and event.Round > state.ValidRound {
|
||||
state.ValidRound = event.Round
|
||||
state.ValidValue = event.BlockID
|
||||
if state.Step == RoundStepPrevote {
|
||||
state.LockedRound = event.Round
|
||||
state.LockedValue = event.BlockID
|
||||
msg = MessageVote { state.Height, state.Round, event.BlockID, Precommit }
|
||||
state.Step = RoundStepPrecommit
|
||||
}
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case Majority23PrevotesAny:
|
||||
if event.Height == state.Height and event.Round == state.Round and state.Step == RoundStepPrevote {
|
||||
timeout = TriggerTimeout { state.Height, state.Round, timeoutPrevote(state.Round) }
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case TimeoutPrevote:
|
||||
if event.Height == state.Height and event.Round == state.Round and state.Step == RoundStepPrevote {
|
||||
msg = MessageVote { state.Height, state.Round, nil, Precommit }
|
||||
state.Step = RoundStepPrecommit
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case Majority23PrecommitBlock:
|
||||
if event.Height == state.Height {
|
||||
state.Step = RoundStepCommit
|
||||
state.LockedValue = event.BlockID
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case Majority23PrecommitAny:
|
||||
if event.Height == state.Height and event.Round == state.Round {
|
||||
timeout = TriggerTimeout { state.Height, state.Round, timeoutPrecommit(state.Round) }
|
||||
}
|
||||
return state, msg, timeout
|
||||
|
||||
case TimeoutPrecommit:
|
||||
if event.Height == state.Height and event.Round == state.Round {
|
||||
state.Round = state.Round + 1
|
||||
}
|
||||
return state, msg, timeout
|
||||
}
|
||||
}
|
||||
|
||||
func ConsensusExecutor() {
|
||||
proposal = nil
|
||||
votes = HeightVoteSet { Height: 1 }
|
||||
state = State {
|
||||
Height: 1
|
||||
Round: 0
|
||||
Step: RoundStepPropose
|
||||
LockedValue: nil
|
||||
LockedRound: -1
|
||||
ValidValue: nil
|
||||
ValidRound: -1
|
||||
}
|
||||
|
||||
event = EventNewHeight {1, id}
|
||||
state, msg, timeout = Consensus(event, state)
|
||||
|
||||
event = EventNewRound {state.Height, 0}
|
||||
state, msg, timeout = Consensus(event, state)
|
||||
|
||||
if msg != nil {
|
||||
send msg
|
||||
}
|
||||
|
||||
if timeout != nil {
|
||||
trigger timeout
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case message := <- msgCh:
|
||||
switch msg := message.(type) {
|
||||
case MessageProposal:
|
||||
|
||||
case MessageVote:
|
||||
if msg.Height == state.Height {
|
||||
newVote = votes.AddVote(msg)
|
||||
if newVote {
|
||||
switch msg.Type {
|
||||
case Prevote:
|
||||
prevotes = votes.Prevotes(msg.Round)
|
||||
if prevotes.WeakCertificate() and msg.Round > state.Round {
|
||||
event = EventNewRound { msg.Height, msg.Round }
|
||||
state, msg, timeout = Consensus(event, state)
|
||||
state = handleStateChange(state, msg, timeout)
|
||||
}
|
||||
|
||||
if blockID, ok = prevotes.TwoThirdsMajority(); ok and blockID != nil {
|
||||
if msg.Round == state.Round and hasBlock(blockID) {
|
||||
event = Majority23PrevotesBlock { msg.Height, msg.Round, blockID }
|
||||
state, msg, timeout = Consensus(event, state)
|
||||
state = handleStateChange(state, msg, timeout)
|
||||
}
|
||||
if proposal != nil and proposal.POLRound == msg.Round and hasBlock(blockID) {
|
||||
event = EventProposal {
|
||||
Height: state.Height
|
||||
Round: state.Round
|
||||
BlockID: blockID
|
||||
POLRound: proposal.POLRound
|
||||
Sender: message.Sender
|
||||
}
|
||||
state, msg, timeout = Consensus(event, state)
|
||||
state = handleStateChange(state, msg, timeout)
|
||||
}
|
||||
}
|
||||
|
||||
if prevotes.HasTwoThirdsAny() and msg.Round == state.Round {
|
||||
event = Majority23PrevotesAny { msg.Height, msg.Round, blockID }
|
||||
state, msg, timeout = Consensus(event, state)
|
||||
state = handleStateChange(state, msg, timeout)
|
||||
}
|
||||
|
||||
case Precommit:
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
case timeout := <- timeoutCh:
|
||||
|
||||
case block := <- blockCh:
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleStateChange(state, msg, timeout) State {
|
||||
if state.Step == Commit {
|
||||
state = ExecuteBlock(state.LockedValue)
|
||||
}
|
||||
if msg != nil {
|
||||
send msg
|
||||
}
|
||||
if timeout != nil {
|
||||
trigger timeout
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Implementation roadmap
|
||||
|
||||
* implement proposed implementation
|
||||
|
@@ -6,10 +6,16 @@ Author: Anton Kaliaev (@melekes)
|
||||
|
||||
02-10-2018: Initial draft
|
||||
|
||||
16-01-2019: Second version based on our conversation with Jae
|
||||
|
||||
17-01-2019: Third version explaining how new design solves current issues
|
||||
|
||||
25-01-2019: Fourth version to treat buffered and unbuffered channels differently
|
||||
|
||||
## Context
|
||||
|
||||
Since the initial version of the pubsub, there's been a number of issues
|
||||
raised: #951, #1879, #1880. Some of them are high-level issues questioning the
|
||||
raised: [#951], [#1879], [#1880]. Some of them are high-level issues questioning the
|
||||
core design choices made. Others are minor and mostly about the interface of
|
||||
`Subscribe()` / `Publish()` functions.
|
||||
|
||||
@@ -40,9 +46,19 @@ goroutines can be used to avoid uncontrolled memory growth.
|
||||
|
||||
In certain cases, this is what you want. But in our case, because we need
|
||||
strict ordering of events (if event A was published before B, the guaranteed
|
||||
delivery order will be A -> B), we can't use goroutines.
|
||||
delivery order will be A -> B), we can't publish msg in a new goroutine every time.
|
||||
|
||||
There is also a question whenever we should have a non-blocking send:
|
||||
We can also have a goroutine per subscriber, although we'd need to be careful
|
||||
with the number of subscribers. It's more difficult to implement as well +
|
||||
unclear if we'll benefit from it (cause we'd be forced to create N additional
|
||||
channels to distribute msg to these goroutines).
|
||||
|
||||
### Non-blocking send
|
||||
|
||||
There is also a question whenever we should have a non-blocking send.
|
||||
Currently, sends are blocking, so publishing to one client can block on
|
||||
publishing to another. This means a slow or unresponsive client can halt the
|
||||
system. Instead, we can use a non-blocking send:
|
||||
|
||||
```go
|
||||
for each subscriber {
|
||||
@@ -56,15 +72,14 @@ for each subscriber {
|
||||
```
|
||||
|
||||
This fixes the "slow client problem", but there is no way for a slow client to
|
||||
know if it had missed a message. On the other hand, if we're going to stick
|
||||
with blocking send, **devs must always ensure subscriber's handling code does not
|
||||
block**. As you can see, there is an implicit choice between ordering guarantees
|
||||
and using goroutines.
|
||||
know if it had missed a message. We could return a second channel and close it
|
||||
to indicate subscription termination. On the other hand, if we're going to
|
||||
stick with blocking send, **devs must always ensure subscriber's handling code
|
||||
does not block**, which is a hard task to put on their shoulders.
|
||||
|
||||
The interim option is to run goroutines pool for a single message, wait for all
|
||||
goroutines to finish. This will solve "slow client problem", but we'd still
|
||||
have to wait `max(goroutine_X_time)` before we can publish the next message.
|
||||
My opinion: not worth doing.
|
||||
|
||||
### Channels vs Callbacks
|
||||
|
||||
@@ -76,36 +91,137 @@ memory leaks and/or memory usage increase.
|
||||
|
||||
Go channels are de-facto standard for carrying data between goroutines.
|
||||
|
||||
**Question: Is it worth switching to callback functions?**
|
||||
|
||||
### Why `Subscribe()` accepts an `out` channel?
|
||||
|
||||
Because in our tests, we create buffered channels (cap: 1). Alternatively, we
|
||||
can make capacity an argument.
|
||||
can make capacity an argument and return a channel.
|
||||
|
||||
## Decision
|
||||
|
||||
Change Subscribe() function to return out channel:
|
||||
### MsgAndTags
|
||||
|
||||
```go
|
||||
// outCap can be used to set capacity of out channel (unbuffered by default).
|
||||
Subscribe(ctx context.Context, clientID string, query Query, outCap... int) (out <-chan interface{}, err error) {
|
||||
```
|
||||
|
||||
It's more idiomatic since we're closing it during Unsubscribe/UnsubscribeAll calls.
|
||||
|
||||
Also, we should make tags available to subscribers:
|
||||
Use a `MsgAndTags` struct on the subscription channel to indicate what tags the
|
||||
msg matched.
|
||||
|
||||
```go
|
||||
type MsgAndTags struct {
|
||||
Msg interface{}
|
||||
Tags TagMap
|
||||
}
|
||||
|
||||
// outCap can be used to set capacity of out channel (unbuffered by default).
|
||||
Subscribe(ctx context.Context, clientID string, query Query, outCap... int) (out <-chan MsgAndTags, err error) {
|
||||
```
|
||||
|
||||
### Subscription Struct
|
||||
|
||||
|
||||
Change `Subscribe()` function to return a `Subscription` struct:
|
||||
|
||||
```go
|
||||
type Subscription struct {
|
||||
// private fields
|
||||
}
|
||||
|
||||
func (s *Subscription) Out() <-chan MsgAndTags
|
||||
func (s *Subscription) Cancelled() <-chan struct{}
|
||||
func (s *Subscription) Err() error
|
||||
```
|
||||
|
||||
`Out()` returns a channel onto which messages and tags are published.
|
||||
`Unsubscribe`/`UnsubscribeAll` does not close the channel to avoid clients from
|
||||
receiving a nil message.
|
||||
|
||||
`Cancelled()` returns a channel that's closed when the subscription is terminated
|
||||
and supposed to be used in a select statement.
|
||||
|
||||
If the channel returned by `Cancelled()` is not closed yet, `Err()` returns nil.
|
||||
If the channel is closed, `Err()` returns a non-nil error explaining why:
|
||||
`ErrUnsubscribed` if the subscriber choose to unsubscribe,
|
||||
`ErrOutOfCapacity` if the subscriber is not pulling messages fast enough and the channel returned by `Out()` became full.
|
||||
After `Err()` returns a non-nil error, successive calls to `Err() return the same error.
|
||||
|
||||
```go
|
||||
subscription, err := pubsub.Subscribe(...)
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case msgAndTags <- subscription.Out():
|
||||
// ...
|
||||
case <-subscription.Cancelled():
|
||||
return subscription.Err()
|
||||
}
|
||||
```
|
||||
|
||||
### Capacity and Subscriptions
|
||||
|
||||
Make the `Out()` channel buffered (with capacity 1) by default. In most cases, we want to
|
||||
terminate the slow subscriber. Only in rare cases, we want to block the pubsub
|
||||
(e.g. when debugging consensus). This should lower the chances of the pubsub
|
||||
being frozen.
|
||||
|
||||
```go
|
||||
// outCap can be used to set capacity of Out channel
|
||||
// (1 by default, must be greater than 0).
|
||||
Subscribe(ctx context.Context, clientID string, query Query, outCap... int) (Subscription, error) {
|
||||
```
|
||||
|
||||
Use a different function for an unbuffered channel:
|
||||
|
||||
```go
|
||||
// Subscription uses an unbuffered channel. Publishing will block.
|
||||
SubscribeUnbuffered(ctx context.Context, clientID string, query Query) (Subscription, error) {
|
||||
```
|
||||
|
||||
SubscribeUnbuffered should not be exposed to users.
|
||||
|
||||
### Blocking/Nonblocking
|
||||
|
||||
The publisher should treat these kinds of channels separately.
|
||||
It should block on unbuffered channels (for use with internal consensus events
|
||||
in the consensus tests) and not block on the buffered ones. If a client is too
|
||||
slow to keep up with it's messages, it's subscription is terminated:
|
||||
|
||||
for each subscription {
|
||||
out := subscription.outChan
|
||||
if cap(out) == 0 {
|
||||
// block on unbuffered channel
|
||||
out <- msg
|
||||
} else {
|
||||
// don't block on buffered channels
|
||||
select {
|
||||
case out <- msg:
|
||||
default:
|
||||
// set the error, notify on the cancel chan
|
||||
subscription.err = fmt.Errorf("client is too slow for msg)
|
||||
close(subscription.cancelChan)
|
||||
|
||||
// ... unsubscribe and close out
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
### How this new design solves the current issues?
|
||||
|
||||
[#951] ([#1880]):
|
||||
|
||||
Because of non-blocking send, situation where we'll deadlock is not possible
|
||||
anymore. If the client stops reading messages, it will be removed.
|
||||
|
||||
[#1879]:
|
||||
|
||||
MsgAndTags is used now instead of a plain message.
|
||||
|
||||
### Future problems and their possible solutions
|
||||
|
||||
[#2826]
|
||||
|
||||
One question I am still pondering about: how to prevent pubsub from slowing
|
||||
down consensus. We can increase the pubsub queue size (which is 0 now). Also,
|
||||
it's probably a good idea to limit the total number of subscribers.
|
||||
|
||||
This can be made automatically. Say we set queue size to 1000 and, when it's >=
|
||||
80% full, refuse new subscriptions.
|
||||
|
||||
## Status
|
||||
|
||||
In review
|
||||
@@ -116,7 +232,16 @@ In review
|
||||
|
||||
- more idiomatic interface
|
||||
- subscribers know what tags msg was published with
|
||||
- subscribers aware of the reason their subscription was cancelled
|
||||
|
||||
### Negative
|
||||
|
||||
- (since v1) no concurrency when it comes to publishing messages
|
||||
|
||||
### Neutral
|
||||
|
||||
|
||||
[#951]: https://github.com/tendermint/tendermint/issues/951
|
||||
[#1879]: https://github.com/tendermint/tendermint/issues/1879
|
||||
[#1880]: https://github.com/tendermint/tendermint/issues/1880
|
||||
[#2826]: https://github.com/tendermint/tendermint/issues/2826
|
||||
|
BIN
docs/imgs/cosmos-tendermint-stack-4k.jpg
Normal file
BIN
docs/imgs/cosmos-tendermint-stack-4k.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 625 KiB |
@@ -79,11 +79,9 @@ make install
|
||||
|
||||
Install [LevelDB](https://github.com/google/leveldb) (minimum version is 1.7).
|
||||
|
||||
Build Tendermint with C libraries: `make build_c`.
|
||||
|
||||
### Ubuntu
|
||||
|
||||
Install LevelDB with snappy:
|
||||
Install LevelDB with snappy (optionally):
|
||||
|
||||
```
|
||||
sudo apt-get update
|
||||
@@ -112,5 +110,13 @@ db_backend = "cleveldb"
|
||||
To install Tendermint, run
|
||||
|
||||
```
|
||||
CGO_LDFLAGS="-lsnappy" go install -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" -tags "tendermint gcc" -o build/tendermint ./cmd/tendermint/
|
||||
CGO_LDFLAGS="-lsnappy" make install_c
|
||||
```
|
||||
|
||||
or run
|
||||
|
||||
```
|
||||
CGO_LDFLAGS="-lsnappy" make build_c
|
||||
```
|
||||
|
||||
to put the binary in `./build`.
|
||||
|
@@ -40,7 +40,11 @@ These files are found in `$HOME/.tendermint`:
|
||||
```
|
||||
$ ls $HOME/.tendermint
|
||||
|
||||
config.toml data genesis.json priv_validator.json
|
||||
config data
|
||||
|
||||
$ ls $HOME/.tendermint/config/
|
||||
|
||||
config.toml genesis.json node_key.json priv_validator.json
|
||||
```
|
||||
|
||||
For a single, local node, no further configuration is required.
|
||||
@@ -110,7 +114,18 @@ source ~/.profile
|
||||
|
||||
This will install `go` and other dependencies, get the Tendermint source code, then compile the `tendermint` binary.
|
||||
|
||||
Next, use the `tendermint testnet` command to create four directories of config files (found in `./mytestnet`) and copy each directory to the relevant machine in the cloud, so that each machine has `$HOME/mytestnet/node[0-3]` directory. Then from each machine, run:
|
||||
Next, use the `tendermint testnet` command to create four directories of config files (found in `./mytestnet`) and copy each directory to the relevant machine in the cloud, so that each machine has `$HOME/mytestnet/node[0-3]` directory.
|
||||
|
||||
Before you can start the network, you'll need peers identifiers (IPs are not enough and can change). We'll refer to them as ID1, ID2, ID3, ID4.
|
||||
|
||||
```
|
||||
tendermint show_node_id --home ./mytestnet/node0
|
||||
tendermint show_node_id --home ./mytestnet/node1
|
||||
tendermint show_node_id --home ./mytestnet/node2
|
||||
tendermint show_node_id --home ./mytestnet/node3
|
||||
```
|
||||
|
||||
Finally, from each machine, run:
|
||||
|
||||
```
|
||||
tendermint node --home ./mytestnet/node0 --proxy_app=kvstore --p2p.persistent_peers="ID1@IP1:26656,ID2@IP2:26656,ID3@IP3:26656,ID4@IP4:26656"
|
||||
@@ -121,6 +136,6 @@ tendermint node --home ./mytestnet/node3 --proxy_app=kvstore --p2p.persistent_pe
|
||||
|
||||
Note that after the third node is started, blocks will start to stream in
|
||||
because >2/3 of validators (defined in the `genesis.json`) have come online.
|
||||
Seeds can also be specified in the `config.toml`. See [here](../tendermint-core/configuration.md) for more information about configuration options.
|
||||
Persistent peers can also be specified in the `config.toml`. See [here](../tendermint-core/configuration.md) for more information about configuration options.
|
||||
|
||||
Transactions can then be sent as covered in the single, local node example above.
|
||||
|
@@ -70,10 +70,6 @@ Tendermint is in essence similar software, but with two key differences:
|
||||
the application logic that's right for them, from key-value store to
|
||||
cryptocurrency to e-voting platform and beyond.
|
||||
|
||||
The layout of this Tendermint website content is also ripped directly
|
||||
and without shame from [consul.io](https://www.consul.io/) and the other
|
||||
[Hashicorp sites](https://www.hashicorp.com/#tools).
|
||||
|
||||
### Bitcoin, Ethereum, etc.
|
||||
|
||||
Tendermint emerged in the tradition of cryptocurrencies like Bitcoin,
|
||||
|
@@ -1,20 +1,17 @@
|
||||
# Docker Compose
|
||||
|
||||
With Docker Compose, we can spin up local testnets in a single command:
|
||||
|
||||
```
|
||||
make localnet-start
|
||||
```
|
||||
With Docker Compose, you can spin up local testnets with a single command.
|
||||
|
||||
## Requirements
|
||||
|
||||
- [Install tendermint](/docs/install.md)
|
||||
- [Install docker](https://docs.docker.com/engine/installation/)
|
||||
- [Install docker-compose](https://docs.docker.com/compose/install/)
|
||||
1. [Install tendermint](/docs/introduction/install.md)
|
||||
2. [Install docker](https://docs.docker.com/engine/installation/)
|
||||
3. [Install docker-compose](https://docs.docker.com/compose/install/)
|
||||
|
||||
## Build
|
||||
|
||||
Build the `tendermint` binary and the `tendermint/localnode` docker image.
|
||||
Build the `tendermint` binary and, optionally, the `tendermint/localnode`
|
||||
docker image.
|
||||
|
||||
Note the binary will be mounted into the container so it can be updated without
|
||||
rebuilding the image.
|
||||
@@ -25,11 +22,10 @@ cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
# Build the linux binary in ./build
|
||||
make build-linux
|
||||
|
||||
# Build tendermint/localnode image
|
||||
# (optionally) Build tendermint/localnode image
|
||||
make build-docker-localnode
|
||||
```
|
||||
|
||||
|
||||
## Run a testnet
|
||||
|
||||
To start a 4 node testnet run:
|
||||
@@ -38,9 +34,13 @@ To start a 4 node testnet run:
|
||||
make localnet-start
|
||||
```
|
||||
|
||||
The nodes bind their RPC servers to ports 26657, 26660, 26662, and 26664 on the host.
|
||||
The nodes bind their RPC servers to ports 26657, 26660, 26662, and 26664 on the
|
||||
host.
|
||||
|
||||
This file creates a 4-node network using the localnode image.
|
||||
The nodes of the network expose their P2P and RPC endpoints to the host machine on ports 26656-26657, 26659-26660, 26661-26662, and 26663-26664 respectively.
|
||||
|
||||
The nodes of the network expose their P2P and RPC endpoints to the host machine
|
||||
on ports 26656-26657, 26659-26660, 26661-26662, and 26663-26664 respectively.
|
||||
|
||||
To update the binary, just rebuild it and restart the nodes:
|
||||
|
||||
@@ -52,34 +52,112 @@ make localnet-start
|
||||
|
||||
## Configuration
|
||||
|
||||
The `make localnet-start` creates files for a 4-node testnet in `./build` by calling the `tendermint testnet` command.
|
||||
The `make localnet-start` creates files for a 4-node testnet in `./build` by
|
||||
calling the `tendermint testnet` command.
|
||||
|
||||
The `./build` directory is mounted to the `/tendermint` mount point to attach the binary and config files to the container.
|
||||
The `./build` directory is mounted to the `/tendermint` mount point to attach
|
||||
the binary and config files to the container.
|
||||
|
||||
For instance, to create a single node testnet:
|
||||
To change the number of validators / non-validators change the `localnet-start` Makefile target:
|
||||
|
||||
```
|
||||
localnet-start: localnet-stop
|
||||
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 5 --n 3 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
The command now will generate config files for 5 validators and 3
|
||||
non-validators network.
|
||||
|
||||
Before running it, don't forget to cleanup the old files:
|
||||
|
||||
```
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
# Clear the build folder
|
||||
rm -rf ./build
|
||||
rm -rf ./build/node*
|
||||
```
|
||||
|
||||
# Build binary
|
||||
make build-linux
|
||||
## Configuring abci containers
|
||||
|
||||
# Create configuration
|
||||
docker run -e LOG="stdout" -v `pwd`/build:/tendermint tendermint/localnode testnet --o . --v 1
|
||||
To use your own abci applications with 4-node setup edit the [docker-compose.yaml](https://github.com/tendermint/tendermint/blob/develop/docker-compose.yml) file and add image to your abci application.
|
||||
|
||||
#Run the node
|
||||
docker run -v `pwd`/build:/tendermint tendermint/localnode
|
||||
```
|
||||
abci0:
|
||||
container_name: abci0
|
||||
image: "abci-image"
|
||||
build:
|
||||
context: .
|
||||
dockerfile: abci.Dockerfile
|
||||
command: <insert command to run your abci application>
|
||||
networks:
|
||||
localnet:
|
||||
ipv4_address: 192.167.10.6
|
||||
|
||||
abci1:
|
||||
container_name: abci1
|
||||
image: "abci-image"
|
||||
build:
|
||||
context: .
|
||||
dockerfile: abci.Dockerfile
|
||||
command: <insert command to run your abci application>
|
||||
networks:
|
||||
localnet:
|
||||
ipv4_address: 192.167.10.7
|
||||
|
||||
abci2:
|
||||
container_name: abci2
|
||||
image: "abci-image"
|
||||
build:
|
||||
context: .
|
||||
dockerfile: abci.Dockerfile
|
||||
command: <insert command to run your abci application>
|
||||
networks:
|
||||
localnet:
|
||||
ipv4_address: 192.167.10.8
|
||||
|
||||
abci3:
|
||||
container_name: abci3
|
||||
image: "abci-image"
|
||||
build:
|
||||
context: .
|
||||
dockerfile: abci.Dockerfile
|
||||
command: <insert command to run your abci application>
|
||||
networks:
|
||||
localnet:
|
||||
ipv4_address: 192.167.10.9
|
||||
|
||||
```
|
||||
|
||||
Override the [command](https://github.com/tendermint/tendermint/blob/master/networks/local/localnode/Dockerfile#L12) in each node to connect to it's abci.
|
||||
|
||||
```
|
||||
node0:
|
||||
container_name: node0
|
||||
image: "tendermint/localnode"
|
||||
ports:
|
||||
- "26656-26657:26656-26657"
|
||||
environment:
|
||||
- ID=0
|
||||
- LOG=$${LOG:-tendermint.log}
|
||||
volumes:
|
||||
- ./build:/tendermint:Z
|
||||
command: node --proxy_app=tcp://abci0:26658
|
||||
networks:
|
||||
localnet:
|
||||
ipv4_address: 192.167.10.2
|
||||
```
|
||||
|
||||
Similarly do for node1, node2 and node3 then [run testnet](https://github.com/tendermint/tendermint/blob/master/docs/networks/docker-compose.md#run-a-testnet)
|
||||
|
||||
## Logging
|
||||
|
||||
Log is saved under the attached volume, in the `tendermint.log` file. If the `LOG` environment variable is set to `stdout` at start, the log is not saved, but printed on the screen.
|
||||
Log is saved under the attached volume, in the `tendermint.log` file. If the
|
||||
`LOG` environment variable is set to `stdout` at start, the log is not saved,
|
||||
but printed on the screen.
|
||||
|
||||
## Special binaries
|
||||
|
||||
If you have multiple binaries with different names, you can specify which one to run with the BINARY environment variable. The path of the binary is relative to the attached volume.
|
||||
|
||||
If you have multiple binaries with different names, you can specify which one
|
||||
to run with the `BINARY` environment variable. The path of the binary is relative
|
||||
to the attached volume.
|
||||
|
4670
docs/package-lock.json
generated
4670
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user