mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-16 12:51:59 +00:00
Compare commits
236 Commits
v0.19.3
...
v0.21.0-rc
Author | SHA1 | Date | |
---|---|---|---|
|
c84be3b8dd | ||
|
050636d5ce | ||
|
27bd1deabe | ||
|
76c82fd433 | ||
|
9481cabd50 | ||
|
46b957929c | ||
|
512e563d4b | ||
|
a6a4fc7784 | ||
|
bfcec02423 | ||
|
fcf61b8088 | ||
|
46fb179605 | ||
|
89925501f3 | ||
|
6b8613b3e7 | ||
|
8be27494bb | ||
|
c661a3ec21 | ||
|
8e45348737 | ||
|
7dfc74a6b6 | ||
|
2edc68c59b | ||
|
fe4123684d | ||
|
2897685c57 | ||
|
71556c62eb | ||
|
54e61468d4 | ||
|
5c7ccbd4a7 | ||
|
e2f5a6fbe4 | ||
|
aa8be33da1 | ||
|
909f66e841 | ||
|
3d2c4fd309 | ||
|
e5bca1df6f | ||
|
866bcceb35 | ||
|
e1e6878a4d | ||
|
e4147b6f1a | ||
|
7606b7595f | ||
|
485b4a0c6f | ||
|
575d94dbb9 | ||
|
ebd2fe7a68 | ||
|
f28eae7816 | ||
|
e13c1ab735 | ||
|
0e0461d9bc | ||
|
057e076ca9 | ||
|
775fef31c2 | ||
|
9cb079dcc6 | ||
|
67180344b7 | ||
|
825fdf2c24 | ||
|
61002ad264 | ||
|
41e847ec97 | ||
|
55bae62d71 | ||
|
d2259696af | ||
|
32719123d9 | ||
|
2ce8179c8b | ||
|
b8c076ca79 | ||
|
1b2e34738a | ||
|
566024b64f | ||
|
932381effa | ||
|
2007c66091 | ||
|
97c5533c35 | ||
|
3d33226e80 | ||
|
edb851280a | ||
|
dd62f06994 | ||
|
19d95b5410 | ||
|
53937a8129 | ||
|
f1c53c7358 | ||
|
3445f1206e | ||
|
097f778c1e | ||
|
c85c21d1bc | ||
|
fd4db8dfdc | ||
|
1318bd18cd | ||
|
ea896865a7 | ||
|
aeb91dfc22 | ||
|
5727916c5b | ||
|
876c8f14e7 | ||
|
67416feb3a | ||
|
8706ae765c | ||
|
954a8941ff | ||
|
1f22f34edf | ||
|
0562009275 | ||
|
fedd07c522 | ||
|
3fa734ef5a | ||
|
cd6bfdc42f | ||
|
98b0c51b5f | ||
|
c777be256a | ||
|
d66f8bf829 | ||
|
bf370d36c2 | ||
|
1c643701f5 | ||
|
0b0290bdb2 | ||
|
a4779fdf51 | ||
|
7030d5c2a7 | ||
|
f34d1009c4 | ||
|
0e3dc32b3d | ||
|
d292fa4541 | ||
|
3255c076e5 | ||
|
978277a4c1 | ||
|
58eb76f34d | ||
|
a017f2fdd4 | ||
|
aaaa5f23e2 | ||
|
178e357d7f | ||
|
683b527534 | ||
|
d584e03427 | ||
|
d454b1b25f | ||
|
432f21452d | ||
|
f0ce8b3883 | ||
|
3da5198631 | ||
|
252a0a392b | ||
|
f7106bfb39 | ||
|
2a517ac98c | ||
|
b542dce2e1 | ||
|
82ded582f2 | ||
|
e0d4fe2dba | ||
|
83c6f2864d | ||
|
33ec8cb609 | ||
|
43a652bcbf | ||
|
094a40084d | ||
|
eec9f142b5 | ||
|
5796e879b9 | ||
|
846ca5ce56 | ||
|
fd6021876b | ||
|
2763c8539c | ||
|
1c0bfe1158 | ||
|
1e87ef7f75 | ||
|
c8be091d4a | ||
|
ec34c8f9d2 | ||
|
6004587347 | ||
|
f55725ebfa | ||
|
7f20eb5f8e | ||
|
eeabb4c06b | ||
|
4da81aa0b7 | ||
|
67068a34f2 | ||
|
2a0e9f93ce | ||
|
708f35e5c1 | ||
|
f3f5c7f472 | ||
|
68f6226bea | ||
|
118b86b1ef | ||
|
b9afcbe3a2 | ||
|
a885af0826 | ||
|
3a947b0117 | ||
|
caf5afc084 | ||
|
2aa5285c66 | ||
|
b166831fb5 | ||
|
423fef1416 | ||
|
b4d10b5b91 | ||
|
6f1bfb6280 | ||
|
5e7177053c | ||
|
a0201e7862 | ||
|
126ddca1a6 | ||
|
186d38dd8a | ||
|
01fd102dba | ||
|
e11f3167ff | ||
|
7d98cfd3d6 | ||
|
4848e88737 | ||
|
60d7486de2 | ||
|
229c18f1bd | ||
|
91b6d3f18c | ||
|
20e9dd0737 | ||
|
7b02b5b66b | ||
|
0cd92a4948 | ||
|
747f28f85f | ||
|
a9d0adbdef | ||
|
3485edf4f5 | ||
|
c6f612bfc3 | ||
|
bb9aa85d22 | ||
|
c4fef499b6 | ||
|
b77d5344fc | ||
|
21f5f3faa7 | ||
|
bf6527fc59 | ||
|
ed8d9951c0 | ||
|
97b39f340e | ||
|
383c255f35 | ||
|
931fb385d7 | ||
|
018e096748 | ||
|
ee4eb59355 | ||
|
082a02e6d1 | ||
|
2c40966e46 | ||
|
0a9dc9f875 | ||
|
87cefb724d | ||
|
6701dba876 | ||
|
442bbe592f | ||
|
301aa92f9c | ||
|
52f27686ef | ||
|
6f9867cba6 | ||
|
02615c8695 | ||
|
2df137193c | ||
|
1ef415728d | ||
|
773e3917ec | ||
|
26fdfe10fd | ||
|
d76e2dc3ff | ||
|
420f925a4d | ||
|
d7d12c8030 | ||
|
6c4a26f248 | ||
|
2a26c47da5 | ||
|
aabe96f1af | ||
|
0e1f730fbb | ||
|
0b68ec4b8e | ||
|
ca120798e4 | ||
|
595fc24c56 | ||
|
4611cf44f0 | ||
|
d596ed1bc2 | ||
|
0fb33ca91d | ||
|
35428ceb53 | ||
|
de8d4325de | ||
|
5a041baa36 | ||
|
202a43a5af | ||
|
2987158a65 | ||
|
c9001d5a11 | ||
|
90446261f3 | ||
|
ae572b9038 | ||
|
0908e668bd | ||
|
e0dbc3673c | ||
|
545990f845 | ||
|
19ccd1842f | ||
|
b4d6bf7697 | ||
|
1854ce41fc | ||
|
547e8223b9 | ||
|
8e46df14e7 | ||
|
8d60a5a7bd | ||
|
5115618550 | ||
|
a6b74b82d1 | ||
|
e5220360c5 | ||
|
b5c4098c53 | ||
|
bc8768cfea | ||
|
d832bde280 | ||
|
5e3a23df6d | ||
|
6f7333fd5f | ||
|
58e3246ffc | ||
|
bbe1355957 | ||
|
7c14fa820d | ||
|
0d93424c6a | ||
|
efc01cf582 | ||
|
754be1887c | ||
|
775b015173 | ||
|
b698a9febc | ||
|
c5f45275ec | ||
|
77f09f5b5e | ||
|
1fe41be929 | ||
|
68a0b3f95b | ||
|
b1f3c11948 | ||
|
e1a3f16fa4 | ||
|
34f5d439ee |
@@ -77,6 +77,22 @@ jobs:
|
||||
paths:
|
||||
- "bin/abci*"
|
||||
|
||||
build_slate:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v1-pkg-cache
|
||||
- restore_cache:
|
||||
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: slate docs
|
||||
command: |
|
||||
set -ex
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make build-slate
|
||||
|
||||
lint:
|
||||
<<: *defaults
|
||||
steps:
|
||||
@@ -117,18 +133,21 @@ jobs:
|
||||
key: v1-pkg-cache
|
||||
- restore_cache:
|
||||
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run: mkdir -p /tmp/logs
|
||||
- run:
|
||||
name: Run tests
|
||||
command: |
|
||||
for pkg in $(go list github.com/tendermint/tendermint/... | grep -v /vendor/ | circleci tests split --split-by=timings); do
|
||||
id=$(basename "$pkg")
|
||||
|
||||
go test -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg"
|
||||
GOCACHE=off go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
done
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- "profiles/*"
|
||||
- store_artifacts:
|
||||
path: /tmp/logs
|
||||
|
||||
test_persistence:
|
||||
<<: *defaults
|
||||
@@ -153,7 +172,7 @@ jobs:
|
||||
- checkout
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
- run: bash test/circleci/p2p.sh
|
||||
- run: bash test/p2p/circleci.sh
|
||||
|
||||
upload_coverage:
|
||||
<<: *defaults
|
||||
|
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -1,4 +1,4 @@
|
||||
# CODEOWNERS: https://help.github.com/articles/about-codeowners/
|
||||
|
||||
# Everything goes through Bucky and Anton. For now.
|
||||
* @ebuchman @melekes
|
||||
# Everything goes through Bucky, Anton, Alex. For now.
|
||||
* @ebuchman @melekes @xla
|
||||
|
4
.github/ISSUE_TEMPLATE
vendored
4
.github/ISSUE_TEMPLATE
vendored
@@ -19,10 +19,6 @@ in a case of bug.
|
||||
|
||||
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
|
||||
|
||||
|
||||
**Merkleeyes version** (use `git rev-parse --verify HEAD`, skip if you don't use it):
|
||||
|
||||
|
||||
**Environment**:
|
||||
- **OS** (e.g. from /etc/os-release):
|
||||
- **Install tools**:
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -5,7 +5,6 @@
|
||||
.DS_Store
|
||||
build/*
|
||||
rpc/test/.tendermint
|
||||
.debora
|
||||
.tendermint
|
||||
remote_dump
|
||||
.revision
|
||||
@@ -13,7 +12,6 @@ vendor
|
||||
.vagrant
|
||||
test/p2p/data/
|
||||
test/logs
|
||||
.glide
|
||||
coverage.txt
|
||||
docs/_build
|
||||
docs/tools
|
||||
@@ -25,3 +23,5 @@ scripts/cutWALUntil/cutWALUntil
|
||||
|
||||
.idea/
|
||||
*.iml
|
||||
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
|
133
CHANGELOG.md
133
CHANGELOG.md
@@ -1,5 +1,138 @@
|
||||
# Changelog
|
||||
|
||||
## 0.20.0
|
||||
|
||||
*June 6th, 2018*
|
||||
|
||||
This is the first in a series of breaking releases coming to Tendermint after
|
||||
soliciting developer feedback and conducting security audits.
|
||||
|
||||
This release does not break any blockchain data structures or
|
||||
protocols other than the ABCI messages between Tendermint and the application.
|
||||
|
||||
Applications that upgrade for ABCI v0.11.0 should be able to continue running Tendermint
|
||||
v0.20.0 on blockchains created with v0.19.X
|
||||
|
||||
BREAKING CHANGES
|
||||
|
||||
- [abci] Upgrade to
|
||||
[v0.11.0](https://github.com/tendermint/abci/blob/master/CHANGELOG.md#0110)
|
||||
- [abci] Change Query path for filtering peers by node ID from
|
||||
`p2p/filter/pubkey/<id>` to `p2p/filter/id/<id>`
|
||||
|
||||
## 0.19.9
|
||||
|
||||
*June 5th, 2018*
|
||||
|
||||
BREAKING CHANGES
|
||||
|
||||
- [types/priv_validator] Moved to top level `privval` package
|
||||
|
||||
FEATURES
|
||||
|
||||
- [config] Collapse PeerConfig into P2PConfig
|
||||
- [docs] Add quick-install script
|
||||
- [docs/spec] Add table of Amino prefixes
|
||||
|
||||
BUG FIXES
|
||||
|
||||
- [rpc] Return 404 for unknown endpoints
|
||||
- [consensus] Flush WAL on stop
|
||||
- [evidence] Don't send evidence to peers that are behind
|
||||
- [p2p] Fix memory leak on peer disconnects
|
||||
- [rpc] Fix panic when `per_page=0`
|
||||
|
||||
## 0.19.8
|
||||
|
||||
*June 4th, 2018*
|
||||
|
||||
BREAKING:
|
||||
|
||||
- [p2p] Remove `auth_enc` config option, peer connections are always auth
|
||||
encrypted. Technically a breaking change but seems no one was using it and
|
||||
arguably a bug fix :)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
- [mempool] Fix deadlock under high load when `skip_timeout_commit=true` and
|
||||
`create_empty_blocks=false`
|
||||
|
||||
## 0.19.7
|
||||
|
||||
*May 31st, 2018*
|
||||
|
||||
BREAKING:
|
||||
|
||||
- [libs/pubsub] TagMap#Get returns a string value
|
||||
- [libs/pubsub] NewTagMap accepts a map of strings
|
||||
|
||||
FEATURES
|
||||
|
||||
- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate
|
||||
- [p2p] AllowDuplicateIP config option to refuse connections from same IP.
|
||||
- true by default for now, false by default in next breaking release
|
||||
- [docs] Add docs for query, tx indexing, events, pubsub
|
||||
- [docs] Add some notes about running Tendermint in production
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
- [consensus] Consensus reactor now receives events from a separate synchronous event bus,
|
||||
which is not dependant on external RPC load
|
||||
- [consensus/wal] do not look for height in older files if we've seen height - 1
|
||||
- [docs] Various cleanup and link fixes
|
||||
|
||||
## 0.19.6
|
||||
|
||||
*May 29th, 2018*
|
||||
|
||||
BUG FIXES
|
||||
|
||||
- [blockchain] Fix fast-sync deadlock during high peer turnover
|
||||
|
||||
BUG FIX:
|
||||
|
||||
- [evidence] Dont send peers evidence from heights they haven't synced to yet
|
||||
- [p2p] Refuse connections to more than one peer with the same IP
|
||||
- [docs] Various fixes
|
||||
|
||||
## 0.19.5
|
||||
|
||||
*May 20th, 2018*
|
||||
|
||||
BREAKING CHANGES
|
||||
|
||||
- [rpc/client] TxSearch and UnconfirmedTxs have new arguments (see below)
|
||||
- [rpc/client] TxSearch returns ResultTxSearch
|
||||
- [version] Breaking changes to Go APIs will not be reflected in breaking
|
||||
version change, but will be included in changelog.
|
||||
|
||||
FEATURES
|
||||
|
||||
- [rpc] `/tx_search` takes `page` (starts at 1) and `per_page` (max 100, default 30) args to paginate results
|
||||
- [rpc] `/unconfirmed_txs` takes `limit` (max 100, default 30) arg to limit the output
|
||||
- [config] `mempool.size` and `mempool.cache_size` options
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
- [docs] Lots of updates
|
||||
- [consensus] Only Fsync() the WAL before executing msgs from ourselves
|
||||
|
||||
BUG FIXES
|
||||
|
||||
- [mempool] Enforce upper bound on number of transactions
|
||||
|
||||
## 0.19.4 (May 17th, 2018)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
- [state] Improve tx indexing by using batches
|
||||
- [consensus, state] Improve logging (more consensus logs, fewer tx logs)
|
||||
- [spec] Moved to `docs/spec` (TODO cleanup the rest of the docs ...)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
- [consensus] Fix issue #1575 where a late proposer can get stuck
|
||||
|
||||
## 0.19.3 (May 14th, 2018)
|
||||
|
||||
FEATURES
|
||||
|
@@ -17,7 +17,7 @@
|
||||
# Quick reference
|
||||
|
||||
* **Where to get help:**
|
||||
https://tendermint.com/community
|
||||
https://cosmos.network/community
|
||||
|
||||
* **Where to file issues:**
|
||||
https://github.com/tendermint/tendermint/issues
|
||||
@@ -37,25 +37,29 @@ To get started developing applications, see the [application developers guide](h
|
||||
|
||||
## Start one instance of the Tendermint core with the `kvstore` app
|
||||
|
||||
A very simple example of a built-in app and Tendermint core in one container.
|
||||
A quick example of a built-in app and Tendermint core in one container.
|
||||
|
||||
```
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore
|
||||
```
|
||||
|
||||
## mintnet-kubernetes
|
||||
# Local cluster
|
||||
|
||||
If you want to see many containers talking to each other, consider using [mintnet-kubernetes](https://github.com/tendermint/tools/tree/master/mintnet-kubernetes), which is a tool for running Tendermint-based applications on a Kubernetes cluster.
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/master/Makefile) and run:
|
||||
|
||||
```
|
||||
make build-linux
|
||||
make build-docker-localnode
|
||||
make localnet-start
|
||||
```
|
||||
|
||||
Note that this will build and use a different image than the ones provided here.
|
||||
|
||||
# License
|
||||
|
||||
View [license information](https://raw.githubusercontent.com/tendermint/tendermint/master/LICENSE) for the software contained in this image.
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/master/LICENSE).
|
||||
|
||||
# User Feedback
|
||||
# Contributing
|
||||
|
||||
## Contributing
|
||||
|
||||
You are invited to contribute new features, fixes, or updates, large or small; we are always thrilled to receive pull requests, and do our best to process them as fast as we can.
|
||||
|
||||
Before you start to code, we recommend discussing your plans through a [GitHub](https://github.com/tendermint/tendermint/issues) issue, especially for more ambitious contributions. This gives other contributors a chance to point you in the right direction, give you feedback on your design, and help you find out if someone else is working on the same thing.
|
||||
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information.
|
||||
|
48
Gopkg.lock
generated
48
Gopkg.lock
generated
@@ -5,7 +5,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/btcd"
|
||||
packages = ["btcec"]
|
||||
revision = "675abc5df3c5531bc741b56a765e35623459da6d"
|
||||
revision = "86fed781132ac890ee03e906e4ecd5d6fa180c64"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
@@ -82,7 +82,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
revision = "553a641470496b2327abcac10b36396bd98e45c9"
|
||||
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gorilla/websocket"
|
||||
@@ -128,20 +128,20 @@
|
||||
[[projects]]
|
||||
name = "github.com/magiconair/properties"
|
||||
packages = ["."]
|
||||
revision = "c3beff4c2358b44d0493c7dda585e7db7ff28ae6"
|
||||
version = "v1.7.6"
|
||||
revision = "c2353362d570a7bfa228149c62842019201cfb71"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
revision = "00c29f56e2386353d58c599509e8dc3801b0d716"
|
||||
revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pelletier/go-toml"
|
||||
packages = ["."]
|
||||
revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8"
|
||||
version = "v1.1.0"
|
||||
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
@@ -159,7 +159,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
packages = ["."]
|
||||
revision = "d932a24a8ccb8fcadc993e5c6c58f93dac168294"
|
||||
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/afero"
|
||||
@@ -179,8 +179,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4"
|
||||
version = "v0.0.2"
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -226,7 +226,7 @@
|
||||
"leveldb/table",
|
||||
"leveldb/util"
|
||||
]
|
||||
revision = "714f901b98fdb3aa954b4193d8cbd64a28d80cad"
|
||||
revision = "5d6fca44a948d2be89a9702de7717f0168403d3d"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tendermint/abci"
|
||||
@@ -238,8 +238,8 @@
|
||||
"server",
|
||||
"types"
|
||||
]
|
||||
revision = "78a8905690ef54f9d57e3b2b0ee7ad3a04ef3f1f"
|
||||
version = "v0.10.3"
|
||||
revision = "ebee2fe114020aa49c70bbbae50b7079fc7e7b90"
|
||||
version = "v0.11.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -263,12 +263,6 @@
|
||||
revision = "915416979bf70efa4bcbf1c6cd5d64c5fff9fc19"
|
||||
version = "v0.6.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tendermint/go-wire"
|
||||
packages = ["."]
|
||||
revision = "fa721242b042ecd4c6ed1a934ee740db4f74e45c"
|
||||
version = "v0.7.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tendermint/tmlibs"
|
||||
packages = [
|
||||
@@ -281,12 +275,10 @@
|
||||
"flowrate",
|
||||
"log",
|
||||
"merkle",
|
||||
"pubsub",
|
||||
"pubsub/query",
|
||||
"test"
|
||||
]
|
||||
revision = "cc5f287c4798ffe88c04d02df219ecb6932080fd"
|
||||
version = "v0.8.3-rc0"
|
||||
revision = "692f1d86a6e2c0efa698fd1e4541b68c74ffaf38"
|
||||
version = "v0.8.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -301,7 +293,7 @@
|
||||
"ripemd160",
|
||||
"salsa20/salsa"
|
||||
]
|
||||
revision = "b0697eccbea9adec5b7ba8008f4c33d98d733388"
|
||||
revision = "b47b1587369238182299fe4dad77d05b8b461e06"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -313,16 +305,15 @@
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"lex/httplex",
|
||||
"trace"
|
||||
]
|
||||
revision = "5f9ae10d9af5b1c89ae6904293b14b064d4ada23"
|
||||
revision = "1e491301e022f8f977054da4c2d852decd59571f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "bb9c189858d91f42db229b04d45a4c3d23a7662a"
|
||||
revision = "9527bec2660bd847c050fda93a0f0c6dee0800bb"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
@@ -346,7 +337,6 @@
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
|
||||
@@ -384,6 +374,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "52a0dcbebdf8714612444914cfce59a3af8c47c4453a2d43c4ccc5ff1a91d8ea"
|
||||
inputs-digest = "ae6792578b0664708339f4c05e020687d6b799ad6f8282394b919de69e403d1f"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
11
Gopkg.toml
11
Gopkg.toml
@@ -71,7 +71,7 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/abci"
|
||||
version = "~0.10.3"
|
||||
version = "~0.11.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/go-crypto"
|
||||
@@ -79,16 +79,21 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/go-amino"
|
||||
version = "0.9.9"
|
||||
version = "=0.9.9"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/tendermint/tmlibs"
|
||||
version = "~0.8.3-rc0"
|
||||
version = "~0.8.4"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "~1.7.3"
|
||||
|
||||
# this got updated and broke, so locked to an old working commit ...
|
||||
[[override]]
|
||||
name = "google.golang.org/genproto"
|
||||
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
15
Makefile
15
Makefile
@@ -193,8 +193,12 @@ build-docker:
|
||||
build-linux:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
|
||||
build-docker-localnode:
|
||||
cd networks/local
|
||||
make
|
||||
|
||||
# Run a 4-node testnet locally
|
||||
localnet-start:
|
||||
localnet-start: localnet-stop
|
||||
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
|
||||
docker-compose up
|
||||
|
||||
@@ -212,7 +216,7 @@ sentry-start:
|
||||
cd networks/remote/terraform && terraform init && terraform apply -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
|
||||
@if ! [ -f $(CURDIR)/build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 0 --n 4 --o . ; fi
|
||||
cd networks/remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml
|
||||
@echo "Next step: Add your validator setup in the genesis.json and config.tml files and run \"make server-config\". (Public key of validator, chain ID, peer IP and node ID.)"
|
||||
@echo "Next step: Add your validator setup in the genesis.json and config.tml files and run \"make sentry-config\". (Public key of validator, chain ID, peer IP and node ID.)"
|
||||
|
||||
# Configuration management
|
||||
sentry-config:
|
||||
@@ -222,8 +226,11 @@ sentry-stop:
|
||||
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||
cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
|
||||
|
||||
# meant for the CI, inspect script & adapt accordingly
|
||||
build-slate:
|
||||
bash scripts/slate.sh
|
||||
|
||||
# To avoid unintended conflicts with file names, always add to .PHONY
|
||||
# unless there is a reason not to.
|
||||
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
||||
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker sentry-start sentry-config sentry-stop
|
||||
|
||||
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate
|
||||
|
28
README.md
28
README.md
@@ -24,9 +24,14 @@ _NOTE: This is alpha software. Please contact us if you intend to run it in prod
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
|
||||
For more information, from introduction to installation and application development, [Read The Docs](https://tendermint.readthedocs.io/en/master/).
|
||||
For protocol details, see [the specification](/docs/spec).
|
||||
|
||||
For protocol details, see [the specification](./docs/specification/new-spec).
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://tendermint.com/security).
|
||||
|
||||
For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.md)
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
@@ -36,19 +41,20 @@ Go version | Go1.9 or higher
|
||||
|
||||
## Install
|
||||
|
||||
To download pre-built binaries, see our [downloads page](https://tendermint.com/downloads).
|
||||
See the [install instructions](/docs/install.rst)
|
||||
|
||||
To install from source, you should be able to:
|
||||
## Quick Start
|
||||
|
||||
`go get -u github.com/tendermint/tendermint/cmd/tendermint`
|
||||
|
||||
For more details (or if it fails), [read the docs](https://tendermint.readthedocs.io/en/master/install.html).
|
||||
- [Single node](/docs/using-tendermint.rst)
|
||||
- [Local cluster using docker-compose](/networks/local)
|
||||
- [Remote cluster using terraform and ansible](/docs/terraform-and-ansible.rst)
|
||||
- [Join the public testnet](https://cosmos.network/testnet)
|
||||
|
||||
## Resources
|
||||
|
||||
### Tendermint Core
|
||||
|
||||
To use Tendermint, build apps on it, or develop it, [Read The Docs](https://tendermint.readthedocs.io/en/master/).
|
||||
For more, [Read The Docs](https://tendermint.readthedocs.io/en/master/).
|
||||
Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs.
|
||||
|
||||
### Sub-projects
|
||||
@@ -88,7 +94,11 @@ According to SemVer, anything in the public API can change at any time before ve
|
||||
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
to signal breaking changes across a subset of the total public API. This subset includes all
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), as well as parts of the following packages:
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
|
||||
include the in-process Go APIs.
|
||||
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- types
|
||||
- rpc/client
|
||||
|
23
ROADMAP.md
Normal file
23
ROADMAP.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Roadmap
|
||||
|
||||
BREAKING CHANGES:
|
||||
- Better support for injecting randomness
|
||||
- Upgrade consensus for more real-time use of evidence
|
||||
|
||||
FEATURES:
|
||||
- Use the chain as its own CA for nodes and validators
|
||||
- Tooling to run multiple blockchains/apps, possibly in a single process
|
||||
- State syncing (without transaction replay)
|
||||
- Add authentication and rate-limitting to the RPC
|
||||
|
||||
IMPROVEMENTS:
|
||||
- Improve subtleties around mempool caching and logic
|
||||
- Consensus optimizations:
|
||||
- cache block parts for faster agreement after round changes
|
||||
- propagate block parts rarest first
|
||||
- Better testing of the consensus state machine (ie. use a DSL)
|
||||
- Auto compiled serialization/deserialization code instead of go-wire reflection
|
||||
|
||||
BUG FIXES:
|
||||
- Graceful handling/recovery for apps that have non-determinism or fail to halt
|
||||
- Graceful handling/recovery for violations of safety, or liveness
|
71
SECURITY.md
Normal file
71
SECURITY.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# Security
|
||||
|
||||
As part of our [Coordinated Vulnerability Disclosure
|
||||
Policy](https://tendermint.com/security), we operate a bug bounty.
|
||||
See the policy for more details on submissions and rewards.
|
||||
|
||||
Here is a list of examples of the kinds of bugs we're most interested in:
|
||||
|
||||
## Specification
|
||||
|
||||
- Conceptual flaws
|
||||
- Ambiguities, inconsistencies, or incorrect statements
|
||||
- Mis-match between specification and implementation of any component
|
||||
|
||||
## Consensus
|
||||
|
||||
Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
|
||||
- Validation of blockchain data structures, including blocks, block parts,
|
||||
votes, and so on
|
||||
- Execution of blocks
|
||||
- Validator set changes
|
||||
- Proposer round robin
|
||||
- Two nodes committing conflicting blocks for the same height (safety failure)
|
||||
- A correct node signing conflicting votes
|
||||
- A node halting (liveness failure)
|
||||
- Syncing new and old nodes
|
||||
|
||||
## Networking
|
||||
|
||||
- Authenticated encryption (MITM, information leakage)
|
||||
- Eclipse attacks
|
||||
- Sybil attacks
|
||||
- Long-range attacks
|
||||
- Denial-of-Service
|
||||
|
||||
## RPC
|
||||
|
||||
- Write-access to anything besides sending transactions
|
||||
- Denial-of-Service
|
||||
- Leakage of secrets
|
||||
|
||||
## Denial-of-Service
|
||||
|
||||
Attacks may come through the P2P network or the RPC:
|
||||
|
||||
- Amplification attacks
|
||||
- Resource abuse
|
||||
- Deadlocks and race conditions
|
||||
- Panics and unhandled errors
|
||||
|
||||
## Libraries
|
||||
|
||||
- Serialization (Amino)
|
||||
- Reading/Writing files and databases
|
||||
- Logging and monitoring
|
||||
|
||||
## Cryptography
|
||||
|
||||
- Elliptic curves for validator signatures
|
||||
- Hash algorithms and Merkle trees for block validation
|
||||
- Authenticated encryption for P2P connections
|
||||
|
||||
## Light Client
|
||||
|
||||
- Validation of blockchain data structures
|
||||
- Correctly validating an incorrect proof
|
||||
- Incorrectly validating a correct proof
|
||||
- Syncing validator set changes
|
||||
|
||||
|
34
Vagrantfile
vendored
34
Vagrantfile
vendored
@@ -10,31 +10,37 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
# add docker repo
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable"
|
||||
|
||||
# and golang 1.9 support
|
||||
# official repo doesn't have race detection runtime...
|
||||
# add-apt-repository ppa:gophers/archive
|
||||
add-apt-repository ppa:longsleep/golang-backports
|
||||
apt-get update
|
||||
|
||||
# install base requirements
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends wget curl jq zip \
|
||||
make shellcheck bsdmainutils psmisc
|
||||
apt-get install -y docker-ce golang-1.9-go
|
||||
apt-get install -y language-pack-en
|
||||
|
||||
# install docker
|
||||
apt-get install -y --no-install-recommends apt-transport-https \
|
||||
ca-certificates curl software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable"
|
||||
apt-get install -y docker-ce
|
||||
usermod -a -G docker vagrant
|
||||
|
||||
# install go
|
||||
wget -q https://dl.google.com/go/go1.10.1.linux-amd64.tar.gz
|
||||
tar -xvf go1.10.1.linux-amd64.tar.gz
|
||||
mv go /usr/local
|
||||
rm -f go1.10.1.linux-amd64.tar.gz
|
||||
|
||||
# cleanup
|
||||
apt-get autoremove -y
|
||||
|
||||
# needed for docker
|
||||
usermod -a -G docker vagrant
|
||||
|
||||
# set env variables
|
||||
echo 'export PATH=$PATH:/usr/lib/go-1.9/bin:/home/vagrant/go/bin' >> /home/vagrant/.bash_profile
|
||||
echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile
|
||||
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
|
||||
echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile
|
||||
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
|
||||
echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile
|
||||
|
||||
|
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
@@ -66,11 +67,13 @@ type BlockPool struct {
|
||||
// block requests
|
||||
requesters map[int64]*bpRequester
|
||||
height int64 // the lowest key in requesters.
|
||||
numPending int32 // number of requests pending assignment or block response
|
||||
// peers
|
||||
peers map[p2p.ID]*bpPeer
|
||||
maxPeerHeight int64
|
||||
|
||||
// atomic
|
||||
numPending int32 // number of requests pending assignment or block response
|
||||
|
||||
requestsCh chan<- BlockRequest
|
||||
errorsCh chan<- peerError
|
||||
}
|
||||
@@ -151,7 +154,7 @@ func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequester
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
return pool.height, pool.numPending, len(pool.requesters)
|
||||
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
|
||||
}
|
||||
|
||||
// TODO: relax conditions, prevent abuse.
|
||||
@@ -245,7 +248,7 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int
|
||||
}
|
||||
|
||||
if requester.setBlock(block, peerID) {
|
||||
pool.numPending--
|
||||
atomic.AddInt32(&pool.numPending, -1)
|
||||
peer := pool.peers[peerID]
|
||||
if peer != nil {
|
||||
peer.decrPending(blockSize)
|
||||
@@ -291,10 +294,7 @@ func (pool *BlockPool) RemovePeer(peerID p2p.ID) {
|
||||
func (pool *BlockPool) removePeer(peerID p2p.ID) {
|
||||
for _, requester := range pool.requesters {
|
||||
if requester.getPeerID() == peerID {
|
||||
if requester.getBlock() != nil {
|
||||
pool.numPending++
|
||||
}
|
||||
go requester.redo() // pick another peer and ...
|
||||
requester.redo()
|
||||
}
|
||||
}
|
||||
delete(pool.peers, peerID)
|
||||
@@ -332,7 +332,7 @@ func (pool *BlockPool) makeNextRequester() {
|
||||
// request.SetLogger(pool.Logger.With("height", nextHeight))
|
||||
|
||||
pool.requesters[nextHeight] = request
|
||||
pool.numPending++
|
||||
atomic.AddInt32(&pool.numPending, 1)
|
||||
|
||||
err := request.Start()
|
||||
if err != nil {
|
||||
@@ -360,7 +360,7 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
|
||||
|
||||
// unused by tendermint; left for debugging purposes
|
||||
func (pool *BlockPool) debug() string {
|
||||
pool.mtx.Lock() // Lock
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
str := ""
|
||||
@@ -466,8 +466,8 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
|
||||
bpr := &bpRequester{
|
||||
pool: pool,
|
||||
height: height,
|
||||
gotBlockCh: make(chan struct{}),
|
||||
redoCh: make(chan struct{}),
|
||||
gotBlockCh: make(chan struct{}, 1),
|
||||
redoCh: make(chan struct{}, 1),
|
||||
|
||||
peerID: "",
|
||||
block: nil,
|
||||
@@ -481,7 +481,7 @@ func (bpr *bpRequester) OnStart() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns true if the peer matches
|
||||
// Returns true if the peer matches and block doesn't already exist.
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool {
|
||||
bpr.mtx.Lock()
|
||||
if bpr.block != nil || bpr.peerID != peerID {
|
||||
@@ -491,7 +491,10 @@ func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool {
|
||||
bpr.block = block
|
||||
bpr.mtx.Unlock()
|
||||
|
||||
bpr.gotBlockCh <- struct{}{}
|
||||
select {
|
||||
case bpr.gotBlockCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -507,17 +510,27 @@ func (bpr *bpRequester) getPeerID() p2p.ID {
|
||||
return bpr.peerID
|
||||
}
|
||||
|
||||
// This is called from the requestRoutine, upon redo().
|
||||
func (bpr *bpRequester) reset() {
|
||||
bpr.mtx.Lock()
|
||||
defer bpr.mtx.Unlock()
|
||||
|
||||
if bpr.block != nil {
|
||||
atomic.AddInt32(&bpr.pool.numPending, 1)
|
||||
}
|
||||
|
||||
bpr.peerID = ""
|
||||
bpr.block = nil
|
||||
bpr.mtx.Unlock()
|
||||
}
|
||||
|
||||
// Tells bpRequester to pick another peer and try again.
|
||||
// NOTE: blocking
|
||||
// NOTE: Nonblocking, and does nothing if another redo
|
||||
// was already requested.
|
||||
func (bpr *bpRequester) redo() {
|
||||
bpr.redoCh <- struct{}{}
|
||||
select {
|
||||
case bpr.redoCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Responsible for making more requests as necessary
|
||||
@@ -546,17 +559,8 @@ OUTER_LOOP:
|
||||
|
||||
// Send request and wait.
|
||||
bpr.pool.sendRequest(bpr.height, peer.id)
|
||||
select {
|
||||
case <-bpr.pool.Quit():
|
||||
bpr.Stop()
|
||||
return
|
||||
case <-bpr.Quit():
|
||||
return
|
||||
case <-bpr.redoCh:
|
||||
bpr.reset()
|
||||
continue OUTER_LOOP // When peer is removed
|
||||
case <-bpr.gotBlockCh:
|
||||
// We got the block, now see if it's good.
|
||||
WAIT_LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-bpr.pool.Quit():
|
||||
bpr.Stop()
|
||||
@@ -566,6 +570,10 @@ OUTER_LOOP:
|
||||
case <-bpr.redoCh:
|
||||
bpr.reset()
|
||||
continue OUTER_LOOP
|
||||
case <-bpr.gotBlockCh:
|
||||
// We got a block!
|
||||
// Continue the for-loop and wait til Quit.
|
||||
continue WAIT_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
@@ -35,7 +36,7 @@ func newBlockchainReactor(logger log.Logger, maxBlockHeight int64) *BlockchainRe
|
||||
fastSync := true
|
||||
var nilApp proxy.AppConnConsensus
|
||||
blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nilApp,
|
||||
types.MockMempool{}, types.MockEvidencePool{})
|
||||
sm.MockMempool{}, sm.MockEvidencePool{})
|
||||
|
||||
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
@@ -204,3 +205,4 @@ func (tp *bcrTestPeer) IsOutbound() bool { return false }
|
||||
func (tp *bcrTestPeer) IsPersistent() bool { return true }
|
||||
func (tp *bcrTestPeer) Get(s string) interface{} { return s }
|
||||
func (tp *bcrTestPeer) Set(string, interface{}) {}
|
||||
func (tp *bcrTestPeer) RemoteIP() net.IP { return []byte{127, 0, 0, 1} }
|
||||
|
@@ -97,7 +97,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
|
||||
incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2})
|
||||
uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0})
|
||||
uncontiguousPartSet.AddPart(part2, false)
|
||||
uncontiguousPartSet.AddPart(part2)
|
||||
|
||||
header1 := types.Header{
|
||||
Height: 1,
|
||||
|
@@ -8,7 +8,7 @@ import (
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
priv_val "github.com/tendermint/tendermint/types/priv_validator"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -30,13 +30,13 @@ func main() {
|
||||
"privPath", *privValPath,
|
||||
)
|
||||
|
||||
privVal := priv_val.LoadFilePV(*privValPath)
|
||||
pv := privval.LoadFilePV(*privValPath)
|
||||
|
||||
rs := priv_val.NewRemoteSigner(
|
||||
rs := privval.NewRemoteSigner(
|
||||
logger,
|
||||
*chainID,
|
||||
*addr,
|
||||
privVal,
|
||||
pv,
|
||||
crypto.GenPrivKeyEd25519(),
|
||||
)
|
||||
err := rs.Start()
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
|
||||
// GenValidatorCmd allows the generation of a keypair for a
|
||||
@@ -17,7 +17,7 @@ var GenValidatorCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func genValidator(cmd *cobra.Command, args []string) {
|
||||
pv := pvm.GenFilePV("")
|
||||
pv := privval.GenFilePV("")
|
||||
jsbz, err := cdc.MarshalJSON(pv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@@ -7,8 +7,8 @@ import (
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
@@ -26,12 +26,12 @@ func initFiles(cmd *cobra.Command, args []string) error {
|
||||
func initFilesWithConfig(config *cfg.Config) error {
|
||||
// private validator
|
||||
privValFile := config.PrivValidatorFile()
|
||||
var pv *pvm.FilePV
|
||||
var pv *privval.FilePV
|
||||
if cmn.FileExists(privValFile) {
|
||||
pv = pvm.LoadFilePV(privValFile)
|
||||
pv = privval.LoadFilePV(privValFile)
|
||||
logger.Info("Found private validator", "path", privValFile)
|
||||
} else {
|
||||
pv = pvm.GenFilePV(privValFile)
|
||||
pv = privval.GenFilePV(privValFile)
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator", "path", privValFile)
|
||||
}
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
)
|
||||
|
||||
@@ -13,14 +13,14 @@ import (
|
||||
// instance.
|
||||
var ResetAllCmd = &cobra.Command{
|
||||
Use: "unsafe_reset_all",
|
||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator",
|
||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
||||
Run: resetAll,
|
||||
}
|
||||
|
||||
// ResetPrivValidatorCmd resets the private validator files.
|
||||
var ResetPrivValidatorCmd = &cobra.Command{
|
||||
Use: "unsafe_reset_priv_validator",
|
||||
Short: "(unsafe) Reset this node's validator",
|
||||
Short: "(unsafe) Reset this node's validator to genesis state",
|
||||
Run: resetPrivValidator,
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func ResetAll(dbDir, privValFile string, logger log.Logger) {
|
||||
logger.Error("Error removing directory", "err", err)
|
||||
return
|
||||
}
|
||||
logger.Info("Removed all data", "dir", dbDir)
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
@@ -50,11 +50,11 @@ func resetPrivValidator(cmd *cobra.Command, args []string) {
|
||||
func resetFilePV(privValFile string, logger log.Logger) {
|
||||
// Get PrivValidator
|
||||
if _, err := os.Stat(privValFile); err == nil {
|
||||
pv := pvm.LoadFilePV(privValFile)
|
||||
pv := privval.LoadFilePV(privValFile)
|
||||
pv.Reset()
|
||||
logger.Info("Reset PrivValidator", "file", privValFile)
|
||||
logger.Info("Reset PrivValidator to genesis state", "file", privValFile)
|
||||
} else {
|
||||
pv := pvm.GenFilePV(privValFile)
|
||||
pv := privval.GenFilePV(privValFile)
|
||||
pv.Save()
|
||||
logger.Info("Generated PrivValidator", "file", privValFile)
|
||||
}
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
privval "github.com/tendermint/tendermint/types/priv_validator"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
|
||||
// ShowValidatorCmd adds capabilities for showing the validator info.
|
||||
|
@@ -12,8 +12,8 @@ import (
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
@@ -89,7 +89,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
initFilesWithConfig(config)
|
||||
|
||||
pvFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidator)
|
||||
pv := pvm.LoadFilePV(pvFile)
|
||||
pv := privval.LoadFilePV(pvFile)
|
||||
genVals[i] = types.GenesisValidator{
|
||||
PubKey: pv.GetPubKey(),
|
||||
Power: 1,
|
||||
|
@@ -7,6 +7,13 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// FuzzModeDrop is a mode in which we randomly drop reads/writes, connections or sleep
|
||||
FuzzModeDrop = iota
|
||||
// FuzzModeDelay is a mode in which we randomly sleep
|
||||
FuzzModeDelay
|
||||
)
|
||||
|
||||
// NOTE: Most of the structs & relevant comments + the
|
||||
// default configuration options were used to manually
|
||||
// generate the config.toml. Please reflect any changes
|
||||
@@ -287,11 +294,23 @@ type P2PConfig struct {
|
||||
// Does not work if the peer-exchange reactor is disabled.
|
||||
SeedMode bool `mapstructure:"seed_mode"`
|
||||
|
||||
// Authenticated encryption
|
||||
AuthEnc bool `mapstructure:"auth_enc"`
|
||||
|
||||
// Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
// Comma separated list of peer IDs to keep private (will not be gossiped to
|
||||
// other peers)
|
||||
PrivatePeerIDs string `mapstructure:"private_peer_ids"`
|
||||
|
||||
// Toggle to disable guard against peers connecting from the same ip.
|
||||
AllowDuplicateIP bool `mapstructure:"allow_duplicate_ip"`
|
||||
|
||||
// Peer connection configuration.
|
||||
HandshakeTimeout time.Duration `mapstructure:"handshake_timeout"`
|
||||
DialTimeout time.Duration `mapstructure:"dial_timeout"`
|
||||
|
||||
// Testing params.
|
||||
// Force dial to fail
|
||||
TestDialFail bool `mapstructure:"test_dial_fail"`
|
||||
// FUzz connection
|
||||
TestFuzz bool `mapstructure:"test_fuzz"`
|
||||
TestFuzzConfig *FuzzConnConfig `mapstructure:"test_fuzz_config"`
|
||||
}
|
||||
|
||||
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer
|
||||
@@ -307,7 +326,12 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
RecvRate: 512000, // 500 kB/s
|
||||
PexReactor: true,
|
||||
SeedMode: false,
|
||||
AuthEnc: true,
|
||||
AllowDuplicateIP: true, // so non-breaking yet
|
||||
HandshakeTimeout: 20 * time.Second,
|
||||
DialTimeout: 3 * time.Second,
|
||||
TestDialFail: false,
|
||||
TestFuzz: false,
|
||||
TestFuzzConfig: DefaultFuzzConnConfig(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,6 +341,7 @@ func TestP2PConfig() *P2PConfig {
|
||||
cfg.ListenAddress = "tcp://0.0.0.0:36656"
|
||||
cfg.SkipUPNP = true
|
||||
cfg.FlushThrottleTimeout = 10
|
||||
cfg.AllowDuplicateIP = true
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -325,6 +350,26 @@ func (cfg *P2PConfig) AddrBookFile() string {
|
||||
return rootify(cfg.AddrBook, cfg.RootDir)
|
||||
}
|
||||
|
||||
// FuzzConnConfig is a FuzzedConnection configuration.
|
||||
type FuzzConnConfig struct {
|
||||
Mode int
|
||||
MaxDelay time.Duration
|
||||
ProbDropRW float64
|
||||
ProbDropConn float64
|
||||
ProbSleep float64
|
||||
}
|
||||
|
||||
// DefaultFuzzConnConfig returns the default config.
|
||||
func DefaultFuzzConnConfig() *FuzzConnConfig {
|
||||
return &FuzzConnConfig{
|
||||
Mode: FuzzModeDrop,
|
||||
MaxDelay: 3 * time.Second,
|
||||
ProbDropRW: 0.2,
|
||||
ProbDropConn: 0.00,
|
||||
ProbSleep: 0.00,
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// MempoolConfig
|
||||
|
||||
@@ -335,6 +380,7 @@ type MempoolConfig struct {
|
||||
RecheckEmpty bool `mapstructure:"recheck_empty"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
Size int `mapstructure:"size"`
|
||||
CacheSize int `mapstructure:"cache_size"`
|
||||
}
|
||||
|
||||
@@ -345,6 +391,7 @@ func DefaultMempoolConfig() *MempoolConfig {
|
||||
RecheckEmpty: true,
|
||||
Broadcast: true,
|
||||
WalPath: filepath.Join(defaultDataDir, "mempool.wal"),
|
||||
Size: 100000,
|
||||
CacheSize: 100000,
|
||||
}
|
||||
}
|
||||
|
@@ -165,9 +165,6 @@ pex = {{ .P2P.PexReactor }}
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = {{ .P2P.SeedMode }}
|
||||
|
||||
# Authenticated encryption
|
||||
auth_enc = {{ .P2P.AuthEnc }}
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = "{{ .P2P.PrivatePeerIDs }}"
|
||||
|
||||
@@ -179,6 +176,12 @@ recheck_empty = {{ .Mempool.RecheckEmpty }}
|
||||
broadcast = {{ .Mempool.Broadcast }}
|
||||
wal_dir = "{{ .Mempool.WalPath }}"
|
||||
|
||||
# size of the mempool
|
||||
size = {{ .Mempool.Size }}
|
||||
|
||||
# size of the cache (used to filter transactions we saw earlier)
|
||||
cache_size = {{ .Mempool.CacheSize }}
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
|
@@ -1,18 +1 @@
|
||||
# The core consensus algorithm.
|
||||
|
||||
* state.go - The state machine as detailed in the whitepaper
|
||||
* reactor.go - A reactor that connects the state machine to the gossip network
|
||||
|
||||
# Go-routine summary
|
||||
|
||||
The reactor runs 2 go-routines for each added peer: gossipDataRoutine and gossipVotesRoutine.
|
||||
|
||||
The consensus state runs two persistent go-routines: timeoutRoutine and receiveRoutine.
|
||||
Go-routines are also started to trigger timeouts and to avoid blocking when the internalMsgQueue is really backed up.
|
||||
|
||||
# Replay/WAL
|
||||
|
||||
A write-ahead log is used to record all messages processed by the receiveRoutine,
|
||||
which amounts to all inputs to the consensus state machine:
|
||||
messages from peers, messages from ourselves, and timeouts.
|
||||
They can be played back deterministically at startup or using the replay console.
|
||||
See the [consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus) for more information.
|
||||
|
@@ -27,7 +27,7 @@ func init() {
|
||||
// Heal partition and ensure A sees the commit
|
||||
func TestByzantine(t *testing.T) {
|
||||
N := 4
|
||||
logger := consensusLogger()
|
||||
logger := consensusLogger().With("test", "byzantine")
|
||||
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
|
||||
|
||||
// give the byzantine validator a normal ticker
|
||||
@@ -58,14 +58,11 @@ func TestByzantine(t *testing.T) {
|
||||
css[i].doPrevote = func(height int64, round int) {}
|
||||
}
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus := css[i].eventBus
|
||||
eventBus.SetLogger(logger.With("module", "events", "validator", i))
|
||||
err := eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
defer eventBus.Stop()
|
||||
|
||||
eventChans[i] = make(chan interface{}, 1)
|
||||
err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
|
||||
err := eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
|
||||
@@ -105,15 +102,18 @@ func TestByzantine(t *testing.T) {
|
||||
p2p.Connect2Switches(sws, i, j)
|
||||
})
|
||||
|
||||
// start the state machines
|
||||
byzR := reactors[0].(*ByzantineReactor)
|
||||
s := byzR.reactor.conS.GetState()
|
||||
byzR.reactor.SwitchToConsensus(s, 0)
|
||||
// start the non-byz state machines.
|
||||
// note these must be started before the byz
|
||||
for i := 1; i < N; i++ {
|
||||
cr := reactors[i].(*ConsensusReactor)
|
||||
cr.SwitchToConsensus(cr.conS.GetState(), 0)
|
||||
}
|
||||
|
||||
// start the byzantine state machine
|
||||
byzR := reactors[0].(*ByzantineReactor)
|
||||
s := byzR.reactor.conS.GetState()
|
||||
byzR.reactor.SwitchToConsensus(s, 0)
|
||||
|
||||
// byz proposer sends one block to peers[0]
|
||||
// and the other block to peers[1] and peers[2].
|
||||
// note peers and switches order don't match.
|
||||
|
@@ -19,9 +19,9 @@ import (
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
dbm "github.com/tendermint/tmlibs/db"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
@@ -262,9 +262,9 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
|
||||
}
|
||||
|
||||
// mock the evidence pool
|
||||
evpool := types.MockEvidencePool{}
|
||||
evpool := sm.MockEvidencePool{}
|
||||
|
||||
// Make ConsensusReactor
|
||||
// Make ConsensusState
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
|
||||
cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
@@ -278,10 +278,10 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
|
||||
return cs
|
||||
}
|
||||
|
||||
func loadPrivValidator(config *cfg.Config) *pvm.FilePV {
|
||||
func loadPrivValidator(config *cfg.Config) *privval.FilePV {
|
||||
privValidatorFile := config.PrivValidatorFile()
|
||||
ensureDir(path.Dir(privValidatorFile), 0700)
|
||||
privValidator := pvm.LoadOrGenFilePV(privValidatorFile)
|
||||
privValidator := privval.LoadOrGenFilePV(privValidatorFile)
|
||||
privValidator.Reset()
|
||||
return privValidator
|
||||
}
|
||||
@@ -379,7 +379,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
||||
privVal = privVals[i]
|
||||
} else {
|
||||
_, tempFilePath := cmn.Tempfile("priv_validator_")
|
||||
privVal = pvm.GenFilePV(tempFilePath)
|
||||
privVal = privval.GenFilePV(tempFilePath)
|
||||
}
|
||||
|
||||
app := appFunc()
|
||||
|
@@ -1,7 +1,6 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
@@ -14,6 +13,7 @@ import (
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -43,7 +43,8 @@ type ConsensusReactor struct {
|
||||
eventBus *types.EventBus
|
||||
}
|
||||
|
||||
// NewConsensusReactor returns a new ConsensusReactor with the given consensusState.
|
||||
// NewConsensusReactor returns a new ConsensusReactor with the given
|
||||
// consensusState.
|
||||
func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *ConsensusReactor {
|
||||
conR := &ConsensusReactor{
|
||||
conS: consensusState,
|
||||
@@ -53,17 +54,15 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *Consens
|
||||
return conR
|
||||
}
|
||||
|
||||
// OnStart implements BaseService.
|
||||
// OnStart implements BaseService by subscribing to events, which later will be
|
||||
// broadcasted to other peers and starting state if we're not in fast sync.
|
||||
func (conR *ConsensusReactor) OnStart() error {
|
||||
conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync())
|
||||
if err := conR.BaseReactor.OnStart(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := conR.startBroadcastRoutine()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conR.subscribeToBroadcastEvents()
|
||||
|
||||
if !conR.FastSync() {
|
||||
err := conR.conS.Start()
|
||||
@@ -75,9 +74,11 @@ func (conR *ConsensusReactor) OnStart() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements BaseService
|
||||
// OnStop implements BaseService by unsubscribing from events and stopping
|
||||
// state.
|
||||
func (conR *ConsensusReactor) OnStop() {
|
||||
conR.BaseReactor.OnStop()
|
||||
conR.unsubscribeFromBroadcastEvents()
|
||||
conR.conS.Stop()
|
||||
}
|
||||
|
||||
@@ -101,6 +102,7 @@ func (conR *ConsensusReactor) SwitchToConsensus(state sm.State, blocksSynced int
|
||||
err := conR.conS.Start()
|
||||
if err != nil {
|
||||
conR.Logger.Error("Error starting conS", "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -345,77 +347,40 @@ func (conR *ConsensusReactor) FastSync() bool {
|
||||
|
||||
//--------------------------------------
|
||||
|
||||
// startBroadcastRoutine subscribes for new round steps, votes and proposal
|
||||
// heartbeats using the event bus and starts a go routine to broadcasts events
|
||||
// to peers upon receiving them.
|
||||
func (conR *ConsensusReactor) startBroadcastRoutine() error {
|
||||
// subscribeToBroadcastEvents subscribes for new round steps, votes and
|
||||
// proposal heartbeats using internal pubsub defined on state to broadcast
|
||||
// them to peers upon receiving.
|
||||
func (conR *ConsensusReactor) subscribeToBroadcastEvents() {
|
||||
const subscriber = "consensus-reactor"
|
||||
ctx := context.Background()
|
||||
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep,
|
||||
func(data tmevents.EventData) {
|
||||
conR.broadcastNewRoundStepMessages(data.(*cstypes.RoundState))
|
||||
})
|
||||
|
||||
// new round steps
|
||||
stepsCh := make(chan interface{})
|
||||
err := conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, stepsCh)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryNewRoundStep)
|
||||
}
|
||||
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote,
|
||||
func(data tmevents.EventData) {
|
||||
conR.broadcastHasVoteMessage(data.(*types.Vote))
|
||||
})
|
||||
|
||||
// votes
|
||||
votesCh := make(chan interface{})
|
||||
err = conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryVote, votesCh)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryVote)
|
||||
}
|
||||
|
||||
// proposal heartbeats
|
||||
heartbeatsCh := make(chan interface{})
|
||||
err = conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryProposalHeartbeat, heartbeatsCh)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryProposalHeartbeat)
|
||||
}
|
||||
|
||||
go func() {
|
||||
var data interface{}
|
||||
var ok bool
|
||||
for {
|
||||
select {
|
||||
case data, ok = <-stepsCh:
|
||||
if ok { // a receive from a closed channel returns the zero value immediately
|
||||
edrs := data.(types.EventDataRoundState)
|
||||
conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState))
|
||||
}
|
||||
case data, ok = <-votesCh:
|
||||
if ok {
|
||||
edv := data.(types.EventDataVote)
|
||||
conR.broadcastHasVoteMessage(edv.Vote)
|
||||
}
|
||||
case data, ok = <-heartbeatsCh:
|
||||
if ok {
|
||||
edph := data.(types.EventDataProposalHeartbeat)
|
||||
conR.broadcastProposalHeartbeatMessage(edph)
|
||||
}
|
||||
case <-conR.Quit():
|
||||
conR.eventBus.UnsubscribeAll(ctx, subscriber)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
conR.eventBus.UnsubscribeAll(ctx, subscriber)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventProposalHeartbeat,
|
||||
func(data tmevents.EventData) {
|
||||
conR.broadcastProposalHeartbeatMessage(data.(*types.Heartbeat))
|
||||
})
|
||||
}
|
||||
|
||||
func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(heartbeat types.EventDataProposalHeartbeat) {
|
||||
hb := heartbeat.Heartbeat
|
||||
func (conR *ConsensusReactor) unsubscribeFromBroadcastEvents() {
|
||||
const subscriber = "consensus-reactor"
|
||||
conR.conS.evsw.RemoveListener(subscriber)
|
||||
}
|
||||
|
||||
func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(hb *types.Heartbeat) {
|
||||
conR.Logger.Debug("Broadcasting proposal heartbeat message",
|
||||
"height", hb.Height, "round", hb.Round, "sequence", hb.Sequence)
|
||||
msg := &ProposalHeartbeatMessage{hb}
|
||||
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
|
||||
}
|
||||
|
||||
func (conR *ConsensusReactor) broadcastNewRoundStep(rs *cstypes.RoundState) {
|
||||
func (conR *ConsensusReactor) broadcastNewRoundStepMessages(rs *cstypes.RoundState) {
|
||||
nrsMsg, csMsg := makeRoundStepMessages(rs)
|
||||
if nrsMsg != nil {
|
||||
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
|
||||
|
@@ -254,7 +254,8 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
logger.Debug("---------------------------- Testing changing the voting power of one validator a few times")
|
||||
|
||||
val1PubKey := css[0].privValidator.GetPubKey()
|
||||
updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKey.Bytes(), 25)
|
||||
val1PubKeyABCI := types.TM2PB.PubKey(val1PubKey)
|
||||
updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25)
|
||||
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
@@ -266,7 +267,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKey.Bytes(), 2)
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
@@ -278,7 +279,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKey.Bytes(), 26)
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
@@ -316,7 +317,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
logger.Info("---------------------------- Testing adding one validator")
|
||||
|
||||
newValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
||||
newValidatorTx1 := kvstore.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), testMinPower)
|
||||
valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1)
|
||||
newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
|
||||
|
||||
// wait till everyone makes block 2
|
||||
// ensure the commit includes all validators
|
||||
@@ -342,7 +344,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
logger.Info("---------------------------- Testing changing the voting power of one validator")
|
||||
|
||||
updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
||||
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updateValidatorPubKey1.Bytes(), 25)
|
||||
updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1)
|
||||
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
|
||||
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1)
|
||||
@@ -358,10 +361,12 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
logger.Info("---------------------------- Testing adding two validators at once")
|
||||
|
||||
newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey()
|
||||
newValidatorTx2 := kvstore.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), testMinPower)
|
||||
newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2)
|
||||
newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
|
||||
|
||||
newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey()
|
||||
newValidatorTx3 := kvstore.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), testMinPower)
|
||||
newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3)
|
||||
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||
@@ -373,8 +378,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
//---------------------------------------------------------------------------
|
||||
logger.Info("---------------------------- Testing removing two validators at once")
|
||||
|
||||
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), 0)
|
||||
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), 0)
|
||||
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
|
||||
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
||||
|
@@ -2,7 +2,6 @@ package consensus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
@@ -26,20 +25,24 @@ import (
|
||||
var crc32c = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// Functionality to replay blocks and messages on recovery from a crash.
|
||||
// There are two general failure scenarios: failure during consensus, and failure while applying the block.
|
||||
// The former is handled by the WAL, the latter by the proxyApp Handshake on restart,
|
||||
// which ultimately hands off the work to the WAL.
|
||||
// There are two general failure scenarios:
|
||||
//
|
||||
// 1. failure during consensus
|
||||
// 2. failure while applying the block
|
||||
//
|
||||
// The former is handled by the WAL, the latter by the proxyApp Handshake on
|
||||
// restart, which ultimately hands off the work to the WAL.
|
||||
|
||||
//-----------------------------------------
|
||||
// recover from failure during consensus
|
||||
// by replaying messages from the WAL
|
||||
// 1. Recover from failure during consensus
|
||||
// (by replaying messages from the WAL)
|
||||
//-----------------------------------------
|
||||
|
||||
// Unmarshal and apply a single message to the consensus state
|
||||
// as if it were received in receiveRoutine
|
||||
// Lines that start with "#" are ignored.
|
||||
// NOTE: receiveRoutine should not be running
|
||||
// Unmarshal and apply a single message to the consensus state as if it were
|
||||
// received in receiveRoutine. Lines that start with "#" are ignored.
|
||||
// NOTE: receiveRoutine should not be running.
|
||||
func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan interface{}) error {
|
||||
// skip meta messages
|
||||
// Skip meta messages which exist for demarcating boundaries.
|
||||
if _, ok := msg.Msg.(EndHeightMessage); ok {
|
||||
return nil
|
||||
}
|
||||
@@ -89,17 +92,18 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan
|
||||
return nil
|
||||
}
|
||||
|
||||
// replay only those messages since the last block.
|
||||
// timeoutRoutine should run concurrently to read off tickChan
|
||||
// Replay only those messages since the last block. `timeoutRoutine` should
|
||||
// run concurrently to read off tickChan.
|
||||
func (cs *ConsensusState) catchupReplay(csHeight int64) error {
|
||||
// set replayMode
|
||||
|
||||
// Set replayMode to true so we don't log signing errors.
|
||||
cs.replayMode = true
|
||||
defer func() { cs.replayMode = false }()
|
||||
|
||||
// Ensure that ENDHEIGHT for this height doesn't exist.
|
||||
// Ensure that #ENDHEIGHT for this height doesn't exist.
|
||||
// NOTE: This is just a sanity check. As far as we know things work fine
|
||||
// without it, and Handshake could reuse ConsensusState if it weren't for
|
||||
// this check (since we can crash after writing ENDHEIGHT).
|
||||
// this check (since we can crash after writing #ENDHEIGHT).
|
||||
//
|
||||
// Ignore data corruption errors since this is a sanity check.
|
||||
gr, found, err := cs.wal.SearchForEndHeight(csHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
|
||||
@@ -115,7 +119,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
|
||||
return fmt.Errorf("WAL should not contain #ENDHEIGHT %d", csHeight)
|
||||
}
|
||||
|
||||
// Search for last height marker
|
||||
// Search for last height marker.
|
||||
//
|
||||
// Ignore data corruption errors in previous heights because we only care about last height
|
||||
gr, found, err = cs.wal.SearchForEndHeight(csHeight-1, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
|
||||
@@ -182,29 +186,30 @@ func makeHeightSearchFunc(height int64) auto.SearchFunc {
|
||||
}
|
||||
}*/
|
||||
|
||||
//----------------------------------------------
|
||||
// Recover from failure during block processing
|
||||
// by handshaking with the app to figure out where
|
||||
// we were last and using the WAL to recover there
|
||||
//---------------------------------------------------
|
||||
// 2. Recover from failure while applying the block.
|
||||
// (by handshaking with the app to figure out where
|
||||
// we were last, and using the WAL to recover there.)
|
||||
//---------------------------------------------------
|
||||
|
||||
type Handshaker struct {
|
||||
stateDB dbm.DB
|
||||
initialState sm.State
|
||||
store types.BlockStore
|
||||
appState json.RawMessage
|
||||
store sm.BlockStore
|
||||
genDoc *types.GenesisDoc
|
||||
logger log.Logger
|
||||
|
||||
nBlocks int // number of blocks applied to the state
|
||||
}
|
||||
|
||||
func NewHandshaker(stateDB dbm.DB, state sm.State,
|
||||
store types.BlockStore, appState json.RawMessage) *Handshaker {
|
||||
store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker {
|
||||
|
||||
return &Handshaker{
|
||||
stateDB: stateDB,
|
||||
initialState: state,
|
||||
store: store,
|
||||
appState: appState,
|
||||
genDoc: genDoc,
|
||||
logger: log.NewNopLogger(),
|
||||
nBlocks: 0,
|
||||
}
|
||||
@@ -220,7 +225,8 @@ func (h *Handshaker) NBlocks() int {
|
||||
|
||||
// TODO: retry the handshake/replay if it fails ?
|
||||
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
// handshake is done via info request on the query conn
|
||||
|
||||
// Handshake is done via ABCI Info on the query conn.
|
||||
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{version.Version})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error calling Info: %v", err)
|
||||
@@ -234,15 +240,16 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
|
||||
h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
|
||||
|
||||
// TODO: check version
|
||||
// TODO: check app version.
|
||||
|
||||
// replay blocks up to the latest in the blockstore
|
||||
// Replay blocks up to the latest in the blockstore.
|
||||
_, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error on replay: %v", err)
|
||||
}
|
||||
|
||||
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
|
||||
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced",
|
||||
"appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
|
||||
|
||||
// TODO: (on restart) replay mempool
|
||||
|
||||
@@ -250,7 +257,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
}
|
||||
|
||||
// Replay all blocks since appBlockHeight and ensure the result matches the current state.
|
||||
// Returns the final AppHash or an error
|
||||
// Returns the final AppHash or an error.
|
||||
func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) {
|
||||
|
||||
storeBlockHeight := h.store.Height()
|
||||
@@ -260,14 +267,33 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
|
||||
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain
|
||||
if appBlockHeight == 0 {
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams)
|
||||
req := abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
AppStateBytes: h.appState,
|
||||
Time: h.genDoc.GenesisTime.Unix(), // TODO
|
||||
ChainId: h.genDoc.ChainID,
|
||||
ConsensusParams: csParams,
|
||||
Validators: validators,
|
||||
AppStateBytes: h.genDoc.AppStateJSON,
|
||||
}
|
||||
_, err := proxyApp.Consensus().InitChainSync(req)
|
||||
res, err := proxyApp.Consensus().InitChainSync(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if the app returned validators
|
||||
// or consensus params, update the state
|
||||
// with the them
|
||||
if len(res.Validators) > 0 {
|
||||
vals, err := types.PB2TM.Validators(res.Validators)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state.Validators = types.NewValidatorSet(vals)
|
||||
}
|
||||
if res.ConsensusParams != nil {
|
||||
state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams)
|
||||
}
|
||||
sm.SaveState(h.stateDB, state)
|
||||
}
|
||||
|
||||
// First handle edge cases and constraints on the storeBlockHeight
|
||||
@@ -314,7 +340,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
|
||||
// We haven't run Commit (both the state and app are one block behind),
|
||||
// so replayBlock with the real app.
|
||||
// NOTE: We could instead use the cs.WAL on cs.Start,
|
||||
// but we'd have to allow the WAL to replay a block that wrote it's ENDHEIGHT
|
||||
// but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT
|
||||
h.logger.Info("Replay last block using real app")
|
||||
state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus())
|
||||
return state.AppHash, err
|
||||
@@ -357,7 +383,7 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
|
||||
for i := appBlockHeight + 1; i <= finalBlock; i++ {
|
||||
h.logger.Info("Applying block", "height", i)
|
||||
block := h.store.LoadBlock(i)
|
||||
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger)
|
||||
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, state.LastValidators, h.stateDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -382,7 +408,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
|
||||
block := h.store.LoadBlock(height)
|
||||
meta := h.store.LoadBlockMeta(height)
|
||||
|
||||
blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, types.MockMempool{}, types.MockEvidencePool{})
|
||||
blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, sm.MockMempool{}, sm.MockEvidencePool{})
|
||||
|
||||
var err error
|
||||
state, err = blockExec.ApplyBlock(state, meta.BlockID, block)
|
||||
|
@@ -299,7 +299,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
// Create proxyAppConn connection (consensus, mempool, query)
|
||||
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
|
||||
proxyApp := proxy.NewAppConns(clientCreator,
|
||||
NewHandshaker(stateDB, state, blockStore, gdoc.AppState()))
|
||||
NewHandshaker(stateDB, state, blockStore, gdoc))
|
||||
err = proxyApp.Start()
|
||||
if err != nil {
|
||||
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
|
||||
@@ -310,7 +310,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
cmn.Exit(cmn.Fmt("Failed to start event bus: %v", err))
|
||||
}
|
||||
|
||||
mempool, evpool := types.MockMempool{}, types.MockEvidencePool{}
|
||||
mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
||||
|
||||
consensusState := NewConsensusState(csConfig, state.Copy(), blockExec,
|
||||
|
@@ -13,6 +13,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/abci/example/kvstore"
|
||||
@@ -23,10 +24,10 @@ import (
|
||||
dbm "github.com/tendermint/tmlibs/db"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
)
|
||||
|
||||
@@ -218,15 +219,15 @@ func (e ReachedHeightToStopError) Error() string {
|
||||
return fmt.Sprintf("reached height to stop %d", e.height)
|
||||
}
|
||||
|
||||
// Save simulate WAL's crashing by sending an error to the panicCh and then
|
||||
// Write simulate WAL's crashing by sending an error to the panicCh and then
|
||||
// exiting the cs.receiveRoutine.
|
||||
func (w *crashingWAL) Save(m WALMessage) {
|
||||
func (w *crashingWAL) Write(m WALMessage) {
|
||||
if endMsg, ok := m.(EndHeightMessage); ok {
|
||||
if endMsg.Height == w.heightToStop {
|
||||
w.panicCh <- ReachedHeightToStopError{endMsg.Height}
|
||||
runtime.Goexit()
|
||||
} else {
|
||||
w.next.Save(m)
|
||||
w.next.Write(m)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -238,10 +239,14 @@ func (w *crashingWAL) Save(m WALMessage) {
|
||||
runtime.Goexit()
|
||||
} else {
|
||||
w.msgIndex++
|
||||
w.next.Save(m)
|
||||
w.next.Write(m)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *crashingWAL) WriteSync(m WALMessage) {
|
||||
w.Write(m)
|
||||
}
|
||||
|
||||
func (w *crashingWAL) Group() *auto.Group { return w.next.Group() }
|
||||
func (w *crashingWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
||||
return w.next.SearchForEndHeight(height, options)
|
||||
@@ -259,8 +264,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
mempool = types.MockMempool{}
|
||||
evpool = types.MockEvidencePool{}
|
||||
mempool = sm.MockMempool{}
|
||||
evpool = sm.MockEvidencePool{}
|
||||
)
|
||||
|
||||
//---------------------------------------
|
||||
@@ -325,7 +330,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
walFile := tempWALWithData(walBody)
|
||||
config.Consensus.SetWalFile(walFile)
|
||||
|
||||
privVal := pvm.LoadFilePV(config.PrivValidatorFile())
|
||||
privVal := privval.LoadFilePV(config.PrivValidatorFile())
|
||||
|
||||
wal, err := NewWAL(walFile)
|
||||
if err != nil {
|
||||
@@ -362,7 +367,8 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
}
|
||||
|
||||
// now start the app using the handshake - it should sync
|
||||
handshaker := NewHandshaker(stateDB, state, store, nil)
|
||||
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
||||
handshaker := NewHandshaker(stateDB, state, store, genDoc)
|
||||
proxyApp := proxy.NewAppConns(clientCreator2, handshaker)
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
@@ -412,10 +418,10 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
|
||||
// TODO: get the genesis bytes (https://github.com/tendermint/tendermint/issues/1224)
|
||||
var genesisBytes []byte
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators, genesisBytes}); err != nil {
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -449,10 +455,10 @@ func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, c
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
|
||||
// TODO: get the genesis bytes (https://github.com/tendermint/tendermint/issues/1224)
|
||||
var genesisBytes []byte
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators, genesisBytes}); err != nil {
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -538,7 +544,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
case *types.PartSetHeader:
|
||||
thisBlockParts = types.NewPartSetFromHeader(*p)
|
||||
case *types.Part:
|
||||
_, err := thisBlockParts.AddPart(p, false)
|
||||
_, err := thisBlockParts.AddPart(p)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -629,3 +635,53 @@ func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||
return bs.commits[height-1]
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func TestInitChainUpdateValidators(t *testing.T) {
|
||||
val, _ := types.RandValidator(true, 10)
|
||||
vals := types.NewValidatorSet([]*types.Validator{val})
|
||||
app := &initChainApp{vals: types.TM2PB.Validators(vals)}
|
||||
clientCreator := proxy.NewLocalClientCreator(app)
|
||||
|
||||
config := ResetConfig("proxy_test_")
|
||||
privVal := privval.LoadFilePV(config.PrivValidatorFile())
|
||||
stateDB, state, store := stateAndStore(config, privVal.GetPubKey())
|
||||
|
||||
oldValAddr := state.Validators.Validators[0].Address
|
||||
|
||||
// now start the app using the handshake - it should sync
|
||||
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
||||
handshaker := NewHandshaker(stateDB, state, store, genDoc)
|
||||
proxyApp := proxy.NewAppConns(clientCreator, handshaker)
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
|
||||
// reload the state, check the validator set was updated
|
||||
state = sm.LoadState(stateDB)
|
||||
|
||||
newValAddr := state.Validators.Validators[0].Address
|
||||
expectValAddr := val.Address
|
||||
assert.NotEqual(t, oldValAddr, newValAddr)
|
||||
assert.Equal(t, newValAddr, expectValAddr)
|
||||
}
|
||||
|
||||
func newInitChainApp(vals []abci.Validator) *initChainApp {
|
||||
return &initChainApp{
|
||||
vals: vals,
|
||||
}
|
||||
}
|
||||
|
||||
// returns the vals on InitChain
|
||||
type initChainApp struct {
|
||||
abci.BaseApplication
|
||||
vals []abci.Validator
|
||||
}
|
||||
|
||||
func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain {
|
||||
return abci.ResponseInitChain{
|
||||
Validators: ica.vals,
|
||||
}
|
||||
}
|
||||
|
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -75,9 +76,9 @@ type ConsensusState struct {
|
||||
// services for creating and executing blocks
|
||||
// TODO: encapsulate all of this in one "BlockManager"
|
||||
blockExec *sm.BlockExecutor
|
||||
blockStore types.BlockStore
|
||||
mempool types.Mempool
|
||||
evpool types.EvidencePool
|
||||
blockStore sm.BlockStore
|
||||
mempool sm.Mempool
|
||||
evpool sm.EvidencePool
|
||||
|
||||
// internal state
|
||||
mtx sync.Mutex
|
||||
@@ -110,10 +111,14 @@ type ConsensusState struct {
|
||||
|
||||
// closed when we finish shutting down
|
||||
done chan struct{}
|
||||
|
||||
// synchronous pubsub between consensus state and reactor.
|
||||
// state only emits EventNewRoundStep, EventVote and EventProposalHeartbeat
|
||||
evsw tmevents.EventSwitch
|
||||
}
|
||||
|
||||
// NewConsensusState returns a new ConsensusState.
|
||||
func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *sm.BlockExecutor, blockStore types.BlockStore, mempool types.Mempool, evpool types.EvidencePool) *ConsensusState {
|
||||
func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, mempool sm.Mempool, evpool sm.EvidencePool) *ConsensusState {
|
||||
cs := &ConsensusState{
|
||||
config: config,
|
||||
blockExec: blockExec,
|
||||
@@ -126,6 +131,7 @@ func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *s
|
||||
doWALCatchup: true,
|
||||
wal: nilWAL{},
|
||||
evpool: evpool,
|
||||
evsw: tmevents.NewEventSwitch(),
|
||||
}
|
||||
// set function defaults (may be overwritten before calling Start)
|
||||
cs.decideProposal = cs.defaultDecideProposal
|
||||
@@ -227,6 +233,10 @@ func (cs *ConsensusState) LoadCommit(height int64) *types.Commit {
|
||||
// OnStart implements cmn.Service.
|
||||
// It loads the latest state via the WAL, and starts the timeout and receive routines.
|
||||
func (cs *ConsensusState) OnStart() error {
|
||||
if err := cs.evsw.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// we may set the WAL in testing before calling Start,
|
||||
// so only OpenWAL if its still the nilWAL
|
||||
if _, ok := cs.wal.(nilWAL); ok {
|
||||
@@ -244,8 +254,7 @@ func (cs *ConsensusState) OnStart() error {
|
||||
// NOTE: we will get a build up of garbage go routines
|
||||
// firing on the tockChan until the receiveRoutine is started
|
||||
// to deal with them (by that point, at most one will be valid)
|
||||
err := cs.timeoutTicker.Start()
|
||||
if err != nil {
|
||||
if err := cs.timeoutTicker.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -284,6 +293,8 @@ func (cs *ConsensusState) startRoutines(maxSteps int) {
|
||||
func (cs *ConsensusState) OnStop() {
|
||||
cs.BaseService.OnStop()
|
||||
|
||||
cs.evsw.Stop()
|
||||
|
||||
cs.timeoutTicker.Stop()
|
||||
|
||||
// Make BaseService.Wait() wait until cs.wal.Wait()
|
||||
@@ -504,11 +515,12 @@ func (cs *ConsensusState) updateToState(state sm.State) {
|
||||
|
||||
func (cs *ConsensusState) newStep() {
|
||||
rs := cs.RoundStateEvent()
|
||||
cs.wal.Save(rs)
|
||||
cs.wal.Write(rs)
|
||||
cs.nSteps++
|
||||
// newStep is called by updateToStep in NewConsensusState before the eventBus is set!
|
||||
if cs.eventBus != nil {
|
||||
cs.eventBus.PublishEventNewRoundStep(rs)
|
||||
cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -542,16 +554,16 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
|
||||
case height := <-cs.mempool.TxsAvailable():
|
||||
cs.handleTxsAvailable(height)
|
||||
case mi = <-cs.peerMsgQueue:
|
||||
cs.wal.Save(mi)
|
||||
cs.wal.Write(mi)
|
||||
// handles proposals, block parts, votes
|
||||
// may generate internal events (votes, complete proposals, 2/3 majorities)
|
||||
cs.handleMsg(mi)
|
||||
case mi = <-cs.internalMsgQueue:
|
||||
cs.wal.Save(mi)
|
||||
cs.wal.WriteSync(mi) // NOTE: fsync
|
||||
// handles proposals, block parts, votes
|
||||
cs.handleMsg(mi)
|
||||
case ti := <-cs.timeoutTicker.Chan(): // tockChan:
|
||||
cs.wal.Save(ti)
|
||||
cs.wal.Write(ti)
|
||||
// if the timeout is relevant to the rs
|
||||
// go to the next step
|
||||
cs.handleTimeout(ti, rs)
|
||||
@@ -584,8 +596,9 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
|
||||
err = cs.setProposal(msg.Proposal)
|
||||
case *BlockPartMessage:
|
||||
// if the proposal is complete, we'll enterPrevote or tryFinalizeCommit
|
||||
_, err = cs.addProposalBlockPart(msg.Height, msg.Part, peerID != "")
|
||||
_, err = cs.addProposalBlockPart(msg.Height, msg.Part)
|
||||
if err != nil && msg.Round != cs.Round {
|
||||
cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round)
|
||||
err = nil
|
||||
}
|
||||
case *VoteMessage:
|
||||
@@ -610,7 +623,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
|
||||
cs.Logger.Error("Unknown msg type", reflect.TypeOf(msg))
|
||||
}
|
||||
if err != nil {
|
||||
cs.Logger.Error("Error with msg", "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg)
|
||||
cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -667,16 +680,18 @@ func (cs *ConsensusState) handleTxsAvailable(height int64) {
|
||||
// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round)
|
||||
// NOTE: cs.StartTime was already set for height.
|
||||
func (cs *ConsensusState) enterNewRound(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
|
||||
cs.Logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
|
||||
if now := time.Now(); cs.StartTime.After(now) {
|
||||
cs.Logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now)
|
||||
logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now)
|
||||
}
|
||||
|
||||
cs.Logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
// Increment validators if necessary
|
||||
validators := cs.Validators
|
||||
@@ -695,6 +710,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
|
||||
// and meanwhile we might have received a proposal
|
||||
// for round 0.
|
||||
} else {
|
||||
logger.Info("Resetting Proposal info")
|
||||
cs.Proposal = nil
|
||||
cs.ProposalBlock = nil
|
||||
cs.ProposalBlockParts = nil
|
||||
@@ -748,6 +764,7 @@ func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
|
||||
}
|
||||
cs.privValidator.SignHeartbeat(chainID, heartbeat)
|
||||
cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat})
|
||||
cs.evsw.FireEvent(types.EventProposalHeartbeat, heartbeat)
|
||||
counter++
|
||||
time.Sleep(proposalHeartbeatIntervalSeconds * time.Second)
|
||||
}
|
||||
@@ -757,11 +774,13 @@ func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
|
||||
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
|
||||
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
|
||||
func (cs *ConsensusState) enterPropose(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
|
||||
cs.Logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
cs.Logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPropose:
|
||||
@@ -781,22 +800,22 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
|
||||
|
||||
// Nothing more to do if we're not a validator
|
||||
if cs.privValidator == nil {
|
||||
cs.Logger.Debug("This node is not a validator")
|
||||
logger.Debug("This node is not a validator")
|
||||
return
|
||||
}
|
||||
|
||||
// if not a validator, we're done
|
||||
if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
|
||||
cs.Logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators)
|
||||
logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators)
|
||||
return
|
||||
}
|
||||
cs.Logger.Debug("This node is a validator")
|
||||
logger.Debug("This node is a validator")
|
||||
|
||||
if cs.isProposer() {
|
||||
cs.Logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
||||
logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
||||
cs.decideProposal(height, round)
|
||||
} else {
|
||||
cs.Logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
||||
logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -959,14 +978,16 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) {
|
||||
|
||||
// Enter: any +2/3 prevotes at next round.
|
||||
func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
|
||||
cs.Logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
if !cs.Votes.Prevotes(round).HasTwoThirdsAny() {
|
||||
cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
|
||||
}
|
||||
cs.Logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrevoteWait:
|
||||
@@ -985,12 +1006,14 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
|
||||
// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil,
|
||||
// else, precommit nil otherwise.
|
||||
func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
|
||||
cs.Logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
|
||||
cs.Logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommit:
|
||||
@@ -1004,9 +1027,9 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
// If we don't have a polka, we must precommit nil.
|
||||
if !ok {
|
||||
if cs.LockedBlock != nil {
|
||||
cs.Logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
|
||||
logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
|
||||
} else {
|
||||
cs.Logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
|
||||
logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
|
||||
}
|
||||
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
|
||||
return
|
||||
@@ -1024,9 +1047,9 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
// +2/3 prevoted nil. Unlock and precommit nil.
|
||||
if len(blockID.Hash) == 0 {
|
||||
if cs.LockedBlock == nil {
|
||||
cs.Logger.Info("enterPrecommit: +2/3 prevoted for nil.")
|
||||
logger.Info("enterPrecommit: +2/3 prevoted for nil.")
|
||||
} else {
|
||||
cs.Logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking")
|
||||
logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking")
|
||||
cs.LockedRound = 0
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
@@ -1040,7 +1063,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
|
||||
// If we're already locked on that block, precommit it, and update the LockedRound
|
||||
if cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking")
|
||||
logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking")
|
||||
cs.LockedRound = round
|
||||
cs.eventBus.PublishEventRelock(cs.RoundStateEvent())
|
||||
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
|
||||
@@ -1049,7 +1072,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
|
||||
// If +2/3 prevoted for proposal block, stage and precommit it
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
|
||||
logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
|
||||
// Validate the block.
|
||||
if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil {
|
||||
cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
|
||||
@@ -1079,14 +1102,16 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
|
||||
// Enter: any +2/3 precommits for next round.
|
||||
func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) {
|
||||
cs.Logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
if !cs.Votes.Precommits(round).HasTwoThirdsAny() {
|
||||
cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
|
||||
}
|
||||
cs.Logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommitWait:
|
||||
@@ -1101,11 +1126,13 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
|
||||
|
||||
// Enter: +2/3 precommits for block
|
||||
func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
||||
logger := cs.Logger.With("height", height, "commitRound", commitRound)
|
||||
|
||||
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
|
||||
cs.Logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
cs.Logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterCommit:
|
||||
@@ -1128,6 +1155,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
||||
// Move them over to ProposalBlock if they match the commit hash,
|
||||
// otherwise they'll be cleared in updateToState.
|
||||
if cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
logger.Info("Commit is for locked block. Set ProposalBlock=LockedBlock", "blockHash", blockID.Hash)
|
||||
cs.ProposalBlock = cs.LockedBlock
|
||||
cs.ProposalBlockParts = cs.LockedBlockParts
|
||||
}
|
||||
@@ -1135,6 +1163,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
||||
// If we don't have the block being committed, set up to get it.
|
||||
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) {
|
||||
logger.Info("Commit is for a block we don't know about. Set ProposalBlock=nil", "proposal", cs.ProposalBlock.Hash(), "commit", blockID.Hash)
|
||||
// We're getting the wrong block.
|
||||
// Set up ProposalBlockParts and keep waiting.
|
||||
cs.ProposalBlock = nil
|
||||
@@ -1147,19 +1176,21 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
||||
|
||||
// If we have the block AND +2/3 commits for it, finalize.
|
||||
func (cs *ConsensusState) tryFinalizeCommit(height int64) {
|
||||
logger := cs.Logger.With("height", height)
|
||||
|
||||
if cs.Height != height {
|
||||
cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
|
||||
}
|
||||
|
||||
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
||||
if !ok || len(blockID.Hash) == 0 {
|
||||
cs.Logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.", "height", height)
|
||||
logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.")
|
||||
return
|
||||
}
|
||||
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
// TODO: this happens every time if we're not a validator (ugly logs)
|
||||
// TODO: ^^ wait, why does it matter that we're a validator?
|
||||
cs.Logger.Info("Attempt to finalize failed. We don't have the commit block.", "height", height, "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash)
|
||||
logger.Info("Attempt to finalize failed. We don't have the commit block.", "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1210,23 +1241,28 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Finish writing to the WAL for this height.
|
||||
// NOTE: If we fail before writing this, we'll never write it,
|
||||
// and just recover by running ApplyBlock in the Handshake.
|
||||
// If we moved it before persisting the block, we'd have to allow
|
||||
// WAL replay for blocks with an #ENDHEIGHT
|
||||
// As is, ConsensusState should not be started again
|
||||
// until we successfully call ApplyBlock (ie. here or in Handshake after restart)
|
||||
cs.wal.Save(EndHeightMessage{height})
|
||||
// Write EndHeightMessage{} for this height, implying that the blockstore
|
||||
// has saved the block.
|
||||
//
|
||||
// If we crash before writing this EndHeightMessage{}, we will recover by
|
||||
// running ApplyBlock during the ABCI handshake when we restart. If we
|
||||
// didn't save the block to the blockstore before writing
|
||||
// EndHeightMessage{}, we'd have to change WAL replay -- currently it
|
||||
// complains about replaying for heights where an #ENDHEIGHT entry already
|
||||
// exists.
|
||||
//
|
||||
// Either way, the ConsensusState should not be resumed until we
|
||||
// successfully call ApplyBlock (ie. later here, or in Handshake after
|
||||
// restart).
|
||||
cs.wal.WriteSync(EndHeightMessage{height}) // NOTE: fsync
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Create a copy of the state for staging
|
||||
// and an event cache for txs
|
||||
// Create a copy of the state for staging and an event cache for txs.
|
||||
stateCopy := cs.state.Copy()
|
||||
|
||||
// Execute and commit the block, update and save the state, and update the mempool.
|
||||
// NOTE: the block.AppHash wont reflect these txs until the next block
|
||||
// NOTE The block.AppHash wont reflect these txs until the next block.
|
||||
var err error
|
||||
stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{block.Hash(), blockParts.Header()}, block)
|
||||
if err != nil {
|
||||
@@ -1293,18 +1329,20 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
|
||||
|
||||
// NOTE: block is not necessarily valid.
|
||||
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block.
|
||||
func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, verify bool) (added bool, err error) {
|
||||
func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) (added bool, err error) {
|
||||
// Blocks might be reused, so round mismatch is OK
|
||||
if cs.Height != height {
|
||||
cs.Logger.Debug("Received block part from wrong height", "height", height)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// We're not expecting a block part.
|
||||
if cs.ProposalBlockParts == nil {
|
||||
cs.Logger.Info("Received a block part when we're not expecting any", "height", height)
|
||||
return false, nil // TODO: bad peer? Return error?
|
||||
}
|
||||
|
||||
added, err = cs.ProposalBlockParts.AddPart(part, verify)
|
||||
added, err = cs.ProposalBlockParts.AddPart(part)
|
||||
if err != nil {
|
||||
return added, err
|
||||
}
|
||||
@@ -1322,6 +1360,8 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, v
|
||||
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
|
||||
if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) {
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Info("Updating valid block to new proposal block",
|
||||
"valid-round", cs.Round, "valid-block-hash", cs.ProposalBlock.Hash())
|
||||
cs.ValidRound = cs.Round
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||
@@ -1391,6 +1431,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
|
||||
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
cs.evsw.FireEvent(types.EventVote, vote)
|
||||
|
||||
// if we can skip timeoutCommit and have all the votes now,
|
||||
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
|
||||
@@ -1418,6 +1459,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
}
|
||||
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
cs.evsw.FireEvent(types.EventVote, vote)
|
||||
|
||||
switch vote.Type {
|
||||
case types.VoteTypePrevote:
|
||||
@@ -1453,6 +1495,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
(vote.Round <= cs.Round) &&
|
||||
cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
|
||||
cs.Logger.Info("Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round)
|
||||
cs.ValidRound = vote.Round
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||
|
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
tmpubsub "github.com/tendermint/tmlibs/pubsub"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@@ -50,7 +50,8 @@ func RegisterWALMessages(cdc *amino.Codec) {
|
||||
|
||||
// WAL is an interface for any write-ahead logger.
|
||||
type WAL interface {
|
||||
Save(WALMessage)
|
||||
Write(WALMessage)
|
||||
WriteSync(WALMessage)
|
||||
Group() *auto.Group
|
||||
SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error)
|
||||
|
||||
@@ -98,31 +99,42 @@ func (wal *baseWAL) OnStart() error {
|
||||
if err != nil {
|
||||
return err
|
||||
} else if size == 0 {
|
||||
wal.Save(EndHeightMessage{0})
|
||||
wal.WriteSync(EndHeightMessage{0})
|
||||
}
|
||||
err = wal.group.Start()
|
||||
return err
|
||||
}
|
||||
|
||||
func (wal *baseWAL) OnStop() {
|
||||
wal.BaseService.OnStop()
|
||||
wal.group.Stop()
|
||||
wal.group.Close()
|
||||
}
|
||||
|
||||
// called in newStep and for each pass in receiveRoutine
|
||||
func (wal *baseWAL) Save(msg WALMessage) {
|
||||
// Write is called in newStep and for each receive on the
|
||||
// peerMsgQueue and the timeoutTicker.
|
||||
// NOTE: does not call fsync()
|
||||
func (wal *baseWAL) Write(msg WALMessage) {
|
||||
if wal == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Write the wal message
|
||||
if err := wal.enc.Encode(&TimedWALMessage{time.Now(), msg}); err != nil {
|
||||
cmn.PanicQ(cmn.Fmt("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg))
|
||||
panic(cmn.Fmt("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg))
|
||||
}
|
||||
}
|
||||
|
||||
// WriteSync is called when we receive a msg from ourselves
|
||||
// so that we write to disk before sending signed messages.
|
||||
// NOTE: calls fsync()
|
||||
func (wal *baseWAL) WriteSync(msg WALMessage) {
|
||||
if wal == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: only flush when necessary
|
||||
wal.Write(msg)
|
||||
if err := wal.group.Flush(); err != nil {
|
||||
cmn.PanicQ(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
|
||||
panic(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,13 +144,14 @@ type WALSearchOptions struct {
|
||||
IgnoreDataCorruptionErrors bool
|
||||
}
|
||||
|
||||
// SearchForEndHeight searches for the EndHeightMessage with the height and
|
||||
// returns an auto.GroupReader, whenever it was found or not and an error.
|
||||
// SearchForEndHeight searches for the EndHeightMessage with the given height
|
||||
// and returns an auto.GroupReader, whenever it was found or not and an error.
|
||||
// Group reader will be nil if found equals false.
|
||||
//
|
||||
// CONTRACT: caller must close group reader.
|
||||
func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
||||
var msg *TimedWALMessage
|
||||
lastHeightFound := int64(-1)
|
||||
|
||||
// NOTE: starting from the last file in the group because we're usually
|
||||
// searching for the last height. See replay.go
|
||||
@@ -154,17 +167,25 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
|
||||
for {
|
||||
msg, err = dec.Decode()
|
||||
if err == io.EOF {
|
||||
// OPTIMISATION: no need to look for height in older files if we've seen h < height
|
||||
if lastHeightFound > 0 && lastHeightFound < height {
|
||||
gr.Close()
|
||||
return nil, false, nil
|
||||
}
|
||||
// check next file
|
||||
break
|
||||
}
|
||||
if options.IgnoreDataCorruptionErrors && IsDataCorruptionError(err) {
|
||||
wal.Logger.Debug("Corrupted entry. Skipping...", "err", err)
|
||||
// do nothing
|
||||
continue
|
||||
} else if err != nil {
|
||||
gr.Close()
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if m, ok := msg.Msg.(EndHeightMessage); ok {
|
||||
lastHeightFound = m.Height
|
||||
if m.Height == height { // found
|
||||
wal.Logger.Debug("Found", "height", height, "index", index)
|
||||
return gr, true, nil
|
||||
@@ -259,23 +280,17 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
|
||||
|
||||
b = make([]byte, 4)
|
||||
_, err = dec.rd.Read(b)
|
||||
if err == io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read length: %v", err)
|
||||
}
|
||||
length := binary.BigEndian.Uint32(b)
|
||||
|
||||
if length > maxMsgSizeBytes {
|
||||
return nil, DataCorruptionError{fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes)}
|
||||
return nil, fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes)
|
||||
}
|
||||
|
||||
data := make([]byte, length)
|
||||
_, err = dec.rd.Read(data)
|
||||
if err == io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read data: %v", err)
|
||||
}
|
||||
@@ -297,8 +312,9 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
|
||||
|
||||
type nilWAL struct{}
|
||||
|
||||
func (nilWAL) Save(m WALMessage) {}
|
||||
func (nilWAL) Group() *auto.Group { return nil }
|
||||
func (nilWAL) Write(m WALMessage) {}
|
||||
func (nilWAL) WriteSync(m WALMessage) {}
|
||||
func (nilWAL) Group() *auto.Group { return nil }
|
||||
func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
@@ -13,10 +13,10 @@ import (
|
||||
"github.com/tendermint/abci/example/kvstore"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
auto "github.com/tendermint/tmlibs/autofile"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/db"
|
||||
@@ -40,7 +40,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
||||
// COPY PASTE FROM node.go WITH A FEW MODIFICATIONS
|
||||
// NOTE: we can't import node package because of circular dependency
|
||||
privValidatorFile := config.PrivValidatorFile()
|
||||
privValidator := pvm.LoadOrGenFilePV(privValidatorFile)
|
||||
privValidator := privval.LoadOrGenFilePV(privValidatorFile)
|
||||
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read genesis file")
|
||||
@@ -52,7 +52,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
||||
return nil, errors.Wrap(err, "failed to make genesis state")
|
||||
}
|
||||
blockStore := bc.NewBlockStore(blockStoreDB)
|
||||
handshaker := NewHandshaker(stateDB, state, blockStore, genDoc.AppState())
|
||||
handshaker := NewHandshaker(stateDB, state, blockStore, genDoc)
|
||||
proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker)
|
||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
@@ -65,8 +65,8 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
||||
return nil, errors.Wrap(err, "failed to start event bus")
|
||||
}
|
||||
defer eventBus.Stop()
|
||||
mempool := types.MockMempool{}
|
||||
evpool := types.MockEvidencePool{}
|
||||
mempool := sm.MockMempool{}
|
||||
evpool := sm.MockEvidencePool{}
|
||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
||||
consensusState := NewConsensusState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
|
||||
consensusState.SetLogger(logger)
|
||||
@@ -83,7 +83,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
||||
numBlocksWritten := make(chan struct{})
|
||||
wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten)
|
||||
// see wal.go#103
|
||||
wal.Save(EndHeightMessage{0})
|
||||
wal.Write(EndHeightMessage{0})
|
||||
consensusState.wal = wal
|
||||
|
||||
if err := consensusState.Start(); err != nil {
|
||||
@@ -166,7 +166,7 @@ func newByteBufferWAL(logger log.Logger, enc *WALEncoder, nBlocks int64, signalS
|
||||
// Save writes message to the internal buffer except when heightToStop is
|
||||
// reached, in which case it will signal the caller via signalWhenStopsTo and
|
||||
// skip writing.
|
||||
func (w *byteBufferWAL) Save(m WALMessage) {
|
||||
func (w *byteBufferWAL) Write(m WALMessage) {
|
||||
if w.stopped {
|
||||
w.logger.Debug("WAL already stopped. Not writing message", "msg", m)
|
||||
return
|
||||
@@ -189,6 +189,10 @@ func (w *byteBufferWAL) Save(m WALMessage) {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *byteBufferWAL) WriteSync(m WALMessage) {
|
||||
w.Write(m)
|
||||
}
|
||||
|
||||
func (w *byteBufferWAL) Group() *auto.Group {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
@@ -8,9 +8,9 @@ services:
|
||||
- "46656-46657:46656-46657"
|
||||
environment:
|
||||
- ID=0
|
||||
- LOG=${LOG:-tendermint.log}
|
||||
- LOG=$${LOG:-tendermint.log}
|
||||
volumes:
|
||||
- ${FOLDER:-./build}:/tendermint:Z
|
||||
- ./build:/tendermint:Z
|
||||
networks:
|
||||
localnet:
|
||||
ipv4_address: 192.167.10.2
|
||||
@@ -22,9 +22,9 @@ services:
|
||||
- "46659-46660:46656-46657"
|
||||
environment:
|
||||
- ID=1
|
||||
- LOG=${LOG:-tendermint.log}
|
||||
- LOG=$${LOG:-tendermint.log}
|
||||
volumes:
|
||||
- ${FOLDER:-./build}:/tendermint:Z
|
||||
- ./build:/tendermint:Z
|
||||
networks:
|
||||
localnet:
|
||||
ipv4_address: 192.167.10.3
|
||||
@@ -34,11 +34,11 @@ services:
|
||||
image: "tendermint/localnode"
|
||||
environment:
|
||||
- ID=2
|
||||
- LOG=${LOG:-tendermint.log}
|
||||
- LOG=$${LOG:-tendermint.log}
|
||||
ports:
|
||||
- "46661-46662:46656-46657"
|
||||
volumes:
|
||||
- ${FOLDER:-./build}:/tendermint:Z
|
||||
- ./build:/tendermint:Z
|
||||
networks:
|
||||
localnet:
|
||||
ipv4_address: 192.167.10.4
|
||||
@@ -48,11 +48,11 @@ services:
|
||||
image: "tendermint/localnode"
|
||||
environment:
|
||||
- ID=3
|
||||
- LOG=${LOG:-tendermint.log}
|
||||
- LOG=$${LOG:-tendermint.log}
|
||||
ports:
|
||||
- "46663-46664:46656-46657"
|
||||
volumes:
|
||||
- ${FOLDER:-./build}:/tendermint:Z
|
||||
- ./build:/tendermint:Z
|
||||
networks:
|
||||
localnet:
|
||||
ipv4_address: 192.167.10.5
|
||||
|
@@ -183,6 +183,7 @@ Try running these commands:
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0000000000000000
|
||||
|
||||
> deliver_tx "abc"
|
||||
-> code: OK
|
||||
@@ -194,7 +195,7 @@ Try running these commands:
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x49DFD15CCDACDEAE9728CB01FBB5E8688CA58B91
|
||||
-> data.hex: 0x0200000000000000
|
||||
|
||||
> query "abc"
|
||||
-> code: OK
|
||||
@@ -208,7 +209,7 @@ Try running these commands:
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x70102DB32280373FBF3F9F89DA2A20CE2CD62B0B
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
> query "def"
|
||||
-> code: OK
|
||||
@@ -301,6 +302,7 @@ In another window, start the ``abci-cli console``:
|
||||
|
||||
> set_option serial on
|
||||
-> code: OK
|
||||
-> log: OK (SetOption doesn't return anything.)
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
@@ -1,125 +1,42 @@
|
||||
Application Architecture Guide
|
||||
==============================
|
||||
|
||||
Overview
|
||||
--------
|
||||
Here we provide a brief guide on the recommended architecture of a Tendermint blockchain
|
||||
application.
|
||||
|
||||
A blockchain application is more than the consensus engine and the
|
||||
transaction logic (eg. smart contracts, business logic) as implemented
|
||||
in the ABCI app. There are also (mobile, web, desktop) clients that will
|
||||
need to connect and make use of the app. We will assume for now that you
|
||||
have a well designed transactions and database model, but maybe this
|
||||
will be the topic of another article. This article is more interested in
|
||||
various ways of setting up the "plumbing" and connecting these pieces,
|
||||
and demonstrating some evolving best practices.
|
||||
The following diagram provides a superb example:
|
||||
|
||||
Security
|
||||
--------
|
||||
https://drive.google.com/open?id=1yR2XpRi9YCY9H9uMfcw8-RMJpvDyvjz9
|
||||
|
||||
A very important aspect when constructing a blockchain is security. The
|
||||
consensus model can be DoSed (no consensus possible) by corrupting 1/3
|
||||
of the validators and exploited (writing arbitrary blocks) by corrupting
|
||||
2/3 of the validators. So, while the security is not that of the
|
||||
"weakest link", you should take care that the "average link" is
|
||||
sufficiently hardened.
|
||||
The end-user application here is the Cosmos Voyager, at the bottom left.
|
||||
Voyager communicates with a REST API exposed by a local Light-Client Daemon.
|
||||
The Light-Client Daemon is an application specific program that communicates with
|
||||
Tendermint nodes and verifies Tendermint light-client proofs through the Tendermint Core RPC.
|
||||
The Tendermint Core process communicates with a local ABCI application, where the
|
||||
user query or transaction is actually processed.
|
||||
|
||||
One big attack surface on the validators is the communication between
|
||||
the ABCI app and the tendermint core. This should be highly protected.
|
||||
Ideally, the app and the core are running on the same machine, so no
|
||||
external agent can target the communication channel. You can use unix
|
||||
sockets (with permissions preventing access from other users), or even
|
||||
compile the two apps into one binary if the ABCI app is also writen in
|
||||
go. If you are unable to do that due to language support, then the ABCI
|
||||
app should bind a TCP connection to localhost (127.0.0.1), which is less
|
||||
efficient and secure, but still not reachable from outside. If you must
|
||||
run the ABCI app and tendermint core on separate machines, make sure you
|
||||
have a secure communication channel (ssh tunnel?)
|
||||
The ABCI application must be a deterministic result of the Tendermint consensus - any external influence
|
||||
on the application state that didn't come through Tendermint could cause a
|
||||
consensus failure. Thus *nothing* should communicate with the application except Tendermint via ABCI.
|
||||
|
||||
Now assuming, you have linked together your app and the core securely,
|
||||
you must also make sure no one can get on the machine it is hosted on.
|
||||
At this point it is basic network security. Run on a secure operating
|
||||
system (SELinux?). Limit who has access to the machine (user accounts,
|
||||
but also where the physical machine is hosted). Turn off all services
|
||||
except for ssh, which should only be accessible by some well-guarded
|
||||
public/private key pairs (no password). And maybe even firewall off
|
||||
access to the ports used by the validators, so only known validators can
|
||||
connect.
|
||||
If the application is written in Go, it can be compiled into the Tendermint binary.
|
||||
Otherwise, it should use a unix socket to communicate with Tendermint.
|
||||
If it's necessary to use TCP, extra care must be taken to encrypt and authenticate the connection.
|
||||
|
||||
There was also a suggestion on slack from @jhon about compiling
|
||||
everything together with a unikernel for more security, such as
|
||||
`Mirage <https://mirage.io>`__ or
|
||||
`UNIK <https://github.com/emc-advanced-dev/unik>`__.
|
||||
All reads from the app happen through the Tendermint `/abci_query` endpoint.
|
||||
All writes to the app happen through the Tendermint `/broadcast_tx_*` endpoints.
|
||||
|
||||
Connecting your client to the blockchain
|
||||
----------------------------------------
|
||||
The Light-Client Daemon is what provides light clients (end users) with nearly all the security of a full node.
|
||||
It formats and broadcasts transactions, and verifies proofs of queries and transaction results.
|
||||
Note that it need not be a daemon - the Light-Client logic could instead be implemented in the same process as the end-user application.
|
||||
|
||||
Tendermint Core RPC
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
Note for those ABCI applications with weaker security requirements, the functionality of the Light-Client Daemon can be moved
|
||||
into the ABCI application process itself. That said, exposing the application process to anything besides Tendermint over ABCI
|
||||
requires extreme caution, as all transactions, and possibly all queries, should still pass through Tendermint.
|
||||
|
||||
The concept is that the ABCI app is completely hidden from the outside
|
||||
world and only communicated through a tested and secured `interface
|
||||
exposed by the tendermint core <./specification/rpc.html>`__. This interface
|
||||
exposes a lot of data on the block header and consensus process, which
|
||||
is quite useful for externally verifying the system. It also includes
|
||||
3(!) methods to broadcast a transaction (propose it for the blockchain,
|
||||
and possibly await a response). And one method to query app-specific
|
||||
data from the ABCI application.
|
||||
|
||||
Pros:
|
||||
* Server code already written
|
||||
* Access to block headers to validate merkle proofs (nice for light clients)
|
||||
* Basic read/write functionality is supported
|
||||
|
||||
Cons:
|
||||
* Limited interface to app. All queries must be serialized into
|
||||
[]byte (less expressive than JSON over HTTP) and there is no way to push
|
||||
data from ABCI app to the client (eg. notify me if account X receives a
|
||||
transaction)
|
||||
|
||||
Custom ABCI server
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This was proposed by @wolfposd on slack and demonstrated by
|
||||
`TMChat <https://github.com/wolfposd/TMChat>`__, a sample app. The
|
||||
concept is to write a custom server for your app (with typical REST
|
||||
API/websockets/etc for easy use by a mobile app). This custom server is
|
||||
in the same binary as the ABCI app and data store, so can easily react
|
||||
to complex events there that involve understanding the data format (send
|
||||
a message if my balance drops below 500). All "writes" sent to this
|
||||
server are proxied via websocket/JSON-RPC to tendermint core. When they
|
||||
come back as deliver\_tx over ABCI, they will be written to the data
|
||||
store. For "reads", we can do any queries we wish that are supported by
|
||||
our architecture, using any web technology that is useful. The general
|
||||
architecture is shown in the following diagram:
|
||||
|
||||
Pros: \* Separates application logic from blockchain logic \* Allows
|
||||
much richer, more flexible client-facing API \* Allows pub-sub, watching
|
||||
certain fields, etc.
|
||||
|
||||
Cons: \* Access to ABCI app can be dangerous (be VERY careful not to
|
||||
write unless it comes from the validator node) \* No direct access to
|
||||
the blockchain headers to verify tx \* You must write your own API (but
|
||||
maybe that's a pro...)
|
||||
|
||||
Hybrid solutions
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Likely the least secure but most versatile. The client can access both
|
||||
the tendermint node for all blockchain info, as well as a custom app
|
||||
server, for complex queries and pub-sub on the abci app.
|
||||
|
||||
Pros: All from both above solutions
|
||||
|
||||
Cons: Even more complexity; even more attack vectors (less
|
||||
security)
|
||||
|
||||
Scalability
|
||||
-----------
|
||||
|
||||
Read replica using non-validating nodes? They could forward transactions
|
||||
to the validators (fewer connections, more security), and locally allow
|
||||
all queries in any of the above configurations. Thus, while
|
||||
transaction-processing speed is limited by the speed of the abci app and
|
||||
the number of validators, one should be able to scale our read
|
||||
performance to quite an extent (until the replication process drains too
|
||||
many resources from the validator nodes).
|
||||
See the following for more extensive documentation:
|
||||
- [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1028)
|
||||
- [Tendermint RPC Docs](https://tendermint.github.io/slate/)
|
||||
- [Tendermint in Production](https://github.com/tendermint/tendermint/pull/1618)
|
||||
- [Tendermint Basics](https://tendermint.readthedocs.io/en/master/using-tendermint.html)
|
||||
- [ABCI spec](https://github.com/tendermint/abci/blob/master/specification.rst)
|
||||
|
@@ -178,21 +178,22 @@ connection, to query the local state of the app.
|
||||
Mempool Connection
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The mempool connection is used *only* for CheckTx requests. Transactions
|
||||
are run using CheckTx in the same order they were received by the
|
||||
validator. If the CheckTx returns ``OK``, the transaction is kept in
|
||||
memory and relayed to other peers in the same order it was received.
|
||||
Otherwise, it is discarded.
|
||||
The mempool connection is used *only* for CheckTx requests.
|
||||
Transactions are run using CheckTx in the same order they were
|
||||
received by the validator. If the CheckTx returns ``OK``, the
|
||||
transaction is kept in memory and relayed to other peers in the same
|
||||
order it was received. Otherwise, it is discarded.
|
||||
|
||||
CheckTx requests run concurrently with block processing; so they should
|
||||
run against a copy of the main application state which is reset after
|
||||
every block. This copy is necessary to track transitions made by a
|
||||
sequence of CheckTx requests before they are included in a block. When a
|
||||
block is committed, the application must ensure to reset the mempool
|
||||
state to the latest committed state. Tendermint Core will then filter
|
||||
through all transactions in the mempool, removing any that were included
|
||||
in the block, and re-run the rest using CheckTx against the post-Commit
|
||||
mempool state.
|
||||
CheckTx requests run concurrently with block processing; so they
|
||||
should run against a copy of the main application state which is reset
|
||||
after every block. This copy is necessary to track transitions made by
|
||||
a sequence of CheckTx requests before they are included in a block.
|
||||
When a block is committed, the application must ensure to reset the
|
||||
mempool state to the latest committed state. Tendermint Core will then
|
||||
filter through all transactions in the mempool, removing any that were
|
||||
included in the block, and re-run the rest using CheckTx against the
|
||||
post-Commit mempool state (this behaviour can be turned off with
|
||||
``[mempool] recheck = false``).
|
||||
|
||||
.. container:: toggle
|
||||
|
||||
@@ -226,6 +227,23 @@ mempool state.
|
||||
}
|
||||
}
|
||||
|
||||
Replay Protection
|
||||
^^^^^^^^^^^^^^^^^
|
||||
To prevent old transactions from being replayed, CheckTx must
|
||||
implement replay protection.
|
||||
|
||||
Tendermint provides the first defence layer by keeping a lightweight
|
||||
in-memory cache of 100k (``[mempool] cache_size``) last transactions in
|
||||
the mempool. If Tendermint is just started or the clients sent more
|
||||
than 100k transactions, old transactions may be sent to the
|
||||
application. So it is important CheckTx implements some logic to
|
||||
handle them.
|
||||
|
||||
There are cases where a transaction will (or may) become valid in some
|
||||
future state, in which case you probably want to disable Tendermint's
|
||||
cache. You can do that by setting ``[mempool] cache_size = 0`` in the
|
||||
config.
|
||||
|
||||
Consensus Connection
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
BIN
docs/assets/tm-application-example.png
Normal file
BIN
docs/assets/tm-application-example.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 26 KiB |
14
docs/conf.py
14
docs/conf.py
@@ -71,7 +71,7 @@ language = None
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This patterns also effect to html_static_path and html_extra_path
|
||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture', 'specification/new-spec', 'examples']
|
||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture', 'spec', 'examples']
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
@@ -184,20 +184,10 @@ if os.path.isdir(tools_dir) != True:
|
||||
if os.path.isdir(assets_dir) != True:
|
||||
os.mkdir(assets_dir)
|
||||
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/ansible/README.rst', filename=tools_dir+'/ansible.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/ansible/assets/a_plus_t.png', filename=assets_dir+'/a_plus_t.png')
|
||||
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/docker/README.rst', filename=tools_dir+'/docker.rst')
|
||||
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/README.rst', filename=tools_dir+'/mintnet-kubernetes.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/gce1.png', filename=assets_dir+'/gce1.png')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/gce2.png', filename=assets_dir+'/gce2.png')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/statefulset.png', filename=assets_dir+'/statefulset.png')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets_dir+'/t_plus_k.png')
|
||||
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/terraform-digitalocean/README.rst', filename=tools_dir+'/terraform-digitalocean.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking.rst')
|
||||
urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/monitoring.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/tm-monitor/README.rst', filename='tools/monitoring.rst')
|
||||
|
||||
#### abci spec #################################
|
||||
|
||||
|
@@ -3,8 +3,7 @@ Deploy a Testnet
|
||||
|
||||
Now that we've seen how ABCI works, and even played with a few
|
||||
applications on a single validator node, it's time to deploy a test
|
||||
network to four validator nodes. For this deployment, we'll use the
|
||||
``basecoin`` application.
|
||||
network to four validator nodes.
|
||||
|
||||
Manual Deployments
|
||||
------------------
|
||||
@@ -24,67 +23,42 @@ Here are the steps to setting up a testnet manually:
|
||||
``tendermint init``
|
||||
4) Compile a list of public keys for each validator into a
|
||||
``genesis.json`` file and replace the existing file with it.
|
||||
5) Run ``tendermint node --p2p.persistent_peers=< peer addresses >`` on each node,
|
||||
5) Run ``tendermint node --proxy_app=kvstore --p2p.persistent_peers=< peer addresses >`` on each node,
|
||||
where ``< peer addresses >`` is a comma separated list of the IP:PORT
|
||||
combination for each node. The default port for Tendermint is
|
||||
``46656``. Thus, if the IP addresses of your nodes were
|
||||
``192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4``, the command
|
||||
would look like:
|
||||
``tendermint node --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:46656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:46656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:46656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:46656``.
|
||||
|
||||
::
|
||||
|
||||
tendermint node --proxy_app=kvstore --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:46656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:46656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:46656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:46656
|
||||
|
||||
After a few seconds, all the nodes should connect to each other and start
|
||||
making blocks! For more information, see the Tendermint Networks section
|
||||
of `the guide to using Tendermint <using-tendermint.html>`__.
|
||||
|
||||
But wait! Steps 3 and 4 are quite manual. Instead, use `this script <https://github.com/tendermint/tendermint/blob/develop/docs/examples/init_testnet.sh>`__, which does the heavy lifting for you. And it gets better.
|
||||
|
||||
Instead of the previously linked script to initialize the files required for a testnet, we have the ``tendermint testnet`` command. By default, running ``tendermint testnet`` will create all the required files, just like the script. Of course, you'll still need to manually edit some fields in the ``config.toml``. Alternatively, see the available flags to auto-populate the ``config.toml`` with the fields that would otherwise be passed in via flags when running ``tendermint node``. As you might imagine, this command is useful for manual or automated deployments.
|
||||
|
||||
Automated Deployments
|
||||
---------------------
|
||||
|
||||
While the manual deployment is easy enough, an automated deployment is
|
||||
usually quicker. The below examples show different tools that can be used
|
||||
for automated deployments.
|
||||
The easiest and fastest way to get a testnet up in less than 5 minutes.
|
||||
|
||||
Automated Deployment using Kubernetes
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Local
|
||||
^^^^^
|
||||
|
||||
The `mintnet-kubernetes tool <https://github.com/tendermint/tools/tree/master/mintnet-kubernetes>`__
|
||||
allows automating the deployment of a Tendermint network on an already
|
||||
provisioned Kubernetes cluster. For simple provisioning of a Kubernetes
|
||||
cluster, check out the `Google Cloud Platform <https://cloud.google.com/>`__.
|
||||
|
||||
Automated Deployment using Terraform and Ansible
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The `terraform-digitalocean tool <https://github.com/tendermint/tools/tree/master/terraform-digitalocean>`__
|
||||
allows creating a set of servers on the DigitalOcean cloud.
|
||||
|
||||
The `ansible playbooks <https://github.com/tendermint/tools/tree/master/ansible>`__
|
||||
allow creating and managing a ``basecoin`` or ``ethermint`` testnet on provisioned servers.
|
||||
|
||||
Package Deployment on Linux for developers
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``tendermint`` and ``basecoin`` applications can be installed from RPM or DEB packages on
|
||||
Linux machines for development purposes. The packages are configured to be validators on the
|
||||
one-node network that the machine represents. The services are not started after installation,
|
||||
this way giving an opportunity to reconfigure the applications before starting.
|
||||
|
||||
The Ansible playbooks in the previous section use this repository to install ``basecoin``.
|
||||
After installation, additional steps are executed to make sure that the multi-node testnet has
|
||||
the right configuration before start.
|
||||
|
||||
Install from the CentOS/RedHat repository:
|
||||
With ``docker`` and ``docker-compose`` installed, run the command:
|
||||
|
||||
::
|
||||
|
||||
rpm --import https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint
|
||||
wget -O /etc/yum.repos.d/tendermint.repo https://tendermint-packages.interblock.io/centos/7/os/x86_64/tendermint.repo
|
||||
yum install basecoin
|
||||
make localnet-start
|
||||
|
||||
Install from the Debian/Ubuntu repository:
|
||||
from the root of the tendermint repository. This will spin up a 4-node local testnet.
|
||||
|
||||
::
|
||||
|
||||
wget -O - https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | apt-key add -
|
||||
wget -O /etc/apt/sources.list.d/tendermint.list https://tendermint-packages.interblock.io/debian/tendermint.list
|
||||
apt-get update && apt-get install basecoin
|
||||
Cloud
|
||||
^^^^^
|
||||
|
||||
See the `next section <./terraform-and-ansible.html>`__ for details.
|
||||
|
@@ -10,10 +10,10 @@ documentation](http://tendermint.readthedocs.io/en/master/).
|
||||
|
||||
### Quick Install
|
||||
|
||||
On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vNLfY), like so:
|
||||
On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vpgEI), like so:
|
||||
|
||||
```
|
||||
curl -L https://git.io/vxWlX | bash
|
||||
curl -L https://git.io/vpgEI | bash
|
||||
source ~/.profile
|
||||
```
|
||||
|
||||
@@ -24,7 +24,7 @@ The script is also used to facilitate cluster deployment below.
|
||||
### Manual Install
|
||||
|
||||
Requires:
|
||||
- `go` minimum version 1.9
|
||||
- `go` minimum version 1.10
|
||||
- `$GOPATH` environment variable must be set
|
||||
- `$GOPATH/bin` must be on your `$PATH` (see https://github.com/tendermint/tendermint/wiki/Setting-GOPATH)
|
||||
|
||||
@@ -125,7 +125,7 @@ addresses below as IP1, IP2, IP3, IP4.
|
||||
Then, `ssh` into each machine, and execute [this script](https://git.io/vNLfY):
|
||||
|
||||
```
|
||||
curl -L https://git.io/vNLfY | bash
|
||||
curl -L https://git.io/vpgEI | bash
|
||||
source ~/.profile
|
||||
```
|
||||
|
||||
@@ -134,10 +134,10 @@ This will install `go` and other dependencies, get the Tendermint source code, t
|
||||
Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence:
|
||||
|
||||
```
|
||||
tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
|
||||
tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
|
||||
tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
|
||||
tendermint node --home ./node4 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
|
||||
tendermint node --home ./node0 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
|
||||
tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
|
||||
tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
|
||||
tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
|
||||
```
|
||||
|
||||
Note that after the third node is started, blocks will start to stream in
|
||||
|
69
docs/examples/init_testnet.sh
Normal file
69
docs/examples/init_testnet.sh
Normal file
@@ -0,0 +1,69 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# make all the files
|
||||
tendermint init --home ./tester/node0
|
||||
tendermint init --home ./tester/node1
|
||||
tendermint init --home ./tester/node2
|
||||
tendermint init --home ./tester/node3
|
||||
|
||||
file0=./tester/node0/config/genesis.json
|
||||
file1=./tester/node1/config/genesis.json
|
||||
file2=./tester/node2/config/genesis.json
|
||||
file3=./tester/node3/config/genesis.json
|
||||
|
||||
genesis_time=`cat $file0 | jq '.genesis_time'`
|
||||
chain_id=`cat $file0 | jq '.chain_id'`
|
||||
|
||||
value0=`cat $file0 | jq '.validators[0].pub_key.value'`
|
||||
value1=`cat $file1 | jq '.validators[0].pub_key.value'`
|
||||
value2=`cat $file2 | jq '.validators[0].pub_key.value'`
|
||||
value3=`cat $file3 | jq '.validators[0].pub_key.value'`
|
||||
|
||||
rm $file0
|
||||
rm $file1
|
||||
rm $file2
|
||||
rm $file3
|
||||
|
||||
echo "{
|
||||
\"genesis_time\": $genesis_time,
|
||||
\"chain_id\": $chain_id,
|
||||
\"validators\": [
|
||||
{
|
||||
\"pub_key\": {
|
||||
\"type\": \"AC26791624DE60\",
|
||||
\"value\": $value0
|
||||
},
|
||||
\"power:\": 10,
|
||||
\"name\":, \"\"
|
||||
},
|
||||
{
|
||||
\"pub_key\": {
|
||||
\"type\": \"AC26791624DE60\",
|
||||
\"value\": $value1
|
||||
},
|
||||
\"power:\": 10,
|
||||
\"name\":, \"\"
|
||||
},
|
||||
{
|
||||
\"pub_key\": {
|
||||
\"type\": \"AC26791624DE60\",
|
||||
\"value\": $value2
|
||||
},
|
||||
\"power:\": 10,
|
||||
\"name\":, \"\"
|
||||
},
|
||||
{
|
||||
\"pub_key\": {
|
||||
\"type\": \"AC26791624DE60\",
|
||||
\"value\": $value3
|
||||
},
|
||||
\"power:\": 10,
|
||||
\"name\":, \"\"
|
||||
}
|
||||
],
|
||||
\"app_hash\": \"\"
|
||||
}" >> $file0
|
||||
|
||||
cp $file0 $file1
|
||||
cp $file0 $file2
|
||||
cp $file2 $file3
|
@@ -26,7 +26,7 @@ go get $REPO
|
||||
cd $GOPATH/src/$REPO
|
||||
|
||||
## build
|
||||
git checkout v0.18.0
|
||||
git checkout master
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make install
|
||||
|
166
docs/examples/node0/config/config.toml
Normal file
166
docs/examples/node0/config/config.toml
Normal file
@@ -0,0 +1,166 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
##### main base config options #####
|
||||
|
||||
# TCP or UNIX socket address of the ABCI application,
|
||||
# or the name of an ABCI application compiled in with the Tendermint binary
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
|
||||
# A custom human readable name for this node
|
||||
moniker = "alpha"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast_sync = true
|
||||
|
||||
# Database backend: leveldb | memdb
|
||||
db_backend = "leveldb"
|
||||
|
||||
# Database directory
|
||||
db_path = "data"
|
||||
|
||||
# Output level for logging, including package level options
|
||||
log_level = "main:info,state:info,*:error"
|
||||
|
||||
##### additional base config options #####
|
||||
|
||||
# Path to the JSON file containing the initial validator set and other meta data
|
||||
genesis_file = "config/genesis.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
priv_validator_file = "config/priv_validator.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
|
||||
node_key_file = "config/node_key.json"
|
||||
|
||||
# Mechanism to connect to the ABCI application: socket | grpc
|
||||
abci = "socket"
|
||||
|
||||
# TCP or UNIX socket address for the profiling server to listen on
|
||||
prof_laddr = ""
|
||||
|
||||
# If true, query the ABCI app on connecting to a new peer
|
||||
# so the app can decide if we should keep the connection or not
|
||||
filter_peers = false
|
||||
|
||||
##### advanced configuration options #####
|
||||
|
||||
##### rpc server configuration options #####
|
||||
[rpc]
|
||||
|
||||
# TCP or UNIX socket address for the RPC server to listen on
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
grpc_laddr = ""
|
||||
|
||||
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||
unsafe = false
|
||||
|
||||
##### peer to peer configuration options #####
|
||||
[p2p]
|
||||
|
||||
# Address to listen for incoming connections
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = ""
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
# Do not add private peers to this list if you don't want them advertised
|
||||
persistent_peers = ""
|
||||
|
||||
# Path to address book
|
||||
addr_book_file = "config/addrbook.json"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
addr_book_strict = true
|
||||
|
||||
# Time to wait before flushing messages out on the connection, in ms
|
||||
flush_throttle_timeout = 100
|
||||
|
||||
# Maximum number of peers to connect to
|
||||
max_num_peers = 50
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
max_packet_msg_payload_size = 1024
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
send_rate = 512000
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
recv_rate = 512000
|
||||
|
||||
# Set true to enable the peer-exchange reactor
|
||||
pex = true
|
||||
|
||||
# Seed mode, in which node constantly crawls the network and looks for
|
||||
# peers. If another node asks it for addresses, it responds and disconnects.
|
||||
#
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = false
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = ""
|
||||
|
||||
##### mempool configuration options #####
|
||||
[mempool]
|
||||
|
||||
recheck = true
|
||||
recheck_empty = true
|
||||
broadcast = true
|
||||
wal_dir = "data/mempool.wal"
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
wal_file = "data/cs.wal/wal"
|
||||
|
||||
# All timeouts are in milliseconds
|
||||
timeout_propose = 3000
|
||||
timeout_propose_delta = 500
|
||||
timeout_prevote = 1000
|
||||
timeout_prevote_delta = 500
|
||||
timeout_precommit = 1000
|
||||
timeout_precommit_delta = 500
|
||||
timeout_commit = 1000
|
||||
|
||||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
skip_timeout_commit = false
|
||||
|
||||
# BlockSize
|
||||
max_block_size_txs = 10000
|
||||
max_block_size_bytes = 1
|
||||
|
||||
# EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||
create_empty_blocks = true
|
||||
create_empty_blocks_interval = 0
|
||||
|
||||
# Reactor sleep duration parameters are in milliseconds
|
||||
peer_gossip_sleep_duration = 100
|
||||
peer_query_maj23_sleep_duration = 2000
|
||||
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null" (default)
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "kv"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||
#
|
||||
# It's recommended to index only a subset of tags due to possible memory
|
||||
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||
# transactions.
|
||||
index_tags = ""
|
||||
|
||||
# When set to true, tells indexer to index all tags. Note this may be not
|
||||
# desirable (see the comment above). IndexTags has a precedence over
|
||||
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||
index_all_tags = false
|
39
docs/examples/node0/config/genesis.json
Normal file
39
docs/examples/node0/config/genesis.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"genesis_time": "0001-01-01T00:00:00Z",
|
||||
"chain_id": "test-chain-A2i3OZ",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
}
|
||||
],
|
||||
"app_hash": ""
|
||||
}
|
1
docs/examples/node0/config/node_key.json
Normal file
1
docs/examples/node0/config/node_key.json
Normal file
@@ -0,0 +1 @@
|
||||
{"priv_key":{"type":"954568A3288910","value":"7lY+k6EDllG8Q9gVbF5313t/ag2YGkBVKdVa0YHJ9xO5k0w3Q/hke0Z7UFT1KgVDGRUEKzwAwwjwFQUvgF0ZWg=="}}
|
14
docs/examples/node0/config/priv_validator.json
Normal file
14
docs/examples/node0/config/priv_validator.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"address": "122A9414774A2FCAD026201DA477EF3F41970EF0",
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"priv_key": {
|
||||
"type": "954568A3288910",
|
||||
"value": "YLxp3ho+kySgAnzjBptbxDzSGw2ntGZLsIHQsaVxY/cP6TgB2Odg9ZsH3CZp3XfsF2mj+QC6U6hNFCsvL9BziQ=="
|
||||
}
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
moniker = "penguin"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = ""
|
166
docs/examples/node1/config/config.toml
Normal file
166
docs/examples/node1/config/config.toml
Normal file
@@ -0,0 +1,166 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
##### main base config options #####
|
||||
|
||||
# TCP or UNIX socket address of the ABCI application,
|
||||
# or the name of an ABCI application compiled in with the Tendermint binary
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
|
||||
# A custom human readable name for this node
|
||||
moniker = "bravo"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast_sync = true
|
||||
|
||||
# Database backend: leveldb | memdb
|
||||
db_backend = "leveldb"
|
||||
|
||||
# Database directory
|
||||
db_path = "data"
|
||||
|
||||
# Output level for logging, including package level options
|
||||
log_level = "main:info,state:info,*:error"
|
||||
|
||||
##### additional base config options #####
|
||||
|
||||
# Path to the JSON file containing the initial validator set and other meta data
|
||||
genesis_file = "config/genesis.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
priv_validator_file = "config/priv_validator.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
|
||||
node_key_file = "config/node_key.json"
|
||||
|
||||
# Mechanism to connect to the ABCI application: socket | grpc
|
||||
abci = "socket"
|
||||
|
||||
# TCP or UNIX socket address for the profiling server to listen on
|
||||
prof_laddr = ""
|
||||
|
||||
# If true, query the ABCI app on connecting to a new peer
|
||||
# so the app can decide if we should keep the connection or not
|
||||
filter_peers = false
|
||||
|
||||
##### advanced configuration options #####
|
||||
|
||||
##### rpc server configuration options #####
|
||||
[rpc]
|
||||
|
||||
# TCP or UNIX socket address for the RPC server to listen on
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
grpc_laddr = ""
|
||||
|
||||
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||
unsafe = false
|
||||
|
||||
##### peer to peer configuration options #####
|
||||
[p2p]
|
||||
|
||||
# Address to listen for incoming connections
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = ""
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
# Do not add private peers to this list if you don't want them advertised
|
||||
persistent_peers = ""
|
||||
|
||||
# Path to address book
|
||||
addr_book_file = "config/addrbook.json"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
addr_book_strict = true
|
||||
|
||||
# Time to wait before flushing messages out on the connection, in ms
|
||||
flush_throttle_timeout = 100
|
||||
|
||||
# Maximum number of peers to connect to
|
||||
max_num_peers = 50
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
max_packet_msg_payload_size = 1024
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
send_rate = 512000
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
recv_rate = 512000
|
||||
|
||||
# Set true to enable the peer-exchange reactor
|
||||
pex = true
|
||||
|
||||
# Seed mode, in which node constantly crawls the network and looks for
|
||||
# peers. If another node asks it for addresses, it responds and disconnects.
|
||||
#
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = false
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = ""
|
||||
|
||||
##### mempool configuration options #####
|
||||
[mempool]
|
||||
|
||||
recheck = true
|
||||
recheck_empty = true
|
||||
broadcast = true
|
||||
wal_dir = "data/mempool.wal"
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
wal_file = "data/cs.wal/wal"
|
||||
|
||||
# All timeouts are in milliseconds
|
||||
timeout_propose = 3000
|
||||
timeout_propose_delta = 500
|
||||
timeout_prevote = 1000
|
||||
timeout_prevote_delta = 500
|
||||
timeout_precommit = 1000
|
||||
timeout_precommit_delta = 500
|
||||
timeout_commit = 1000
|
||||
|
||||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
skip_timeout_commit = false
|
||||
|
||||
# BlockSize
|
||||
max_block_size_txs = 10000
|
||||
max_block_size_bytes = 1
|
||||
|
||||
# EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||
create_empty_blocks = true
|
||||
create_empty_blocks_interval = 0
|
||||
|
||||
# Reactor sleep duration parameters are in milliseconds
|
||||
peer_gossip_sleep_duration = 100
|
||||
peer_query_maj23_sleep_duration = 2000
|
||||
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null" (default)
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "kv"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||
#
|
||||
# It's recommended to index only a subset of tags due to possible memory
|
||||
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||
# transactions.
|
||||
index_tags = ""
|
||||
|
||||
# When set to true, tells indexer to index all tags. Note this may be not
|
||||
# desirable (see the comment above). IndexTags has a precedence over
|
||||
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||
index_all_tags = false
|
39
docs/examples/node1/config/genesis.json
Normal file
39
docs/examples/node1/config/genesis.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"genesis_time": "0001-01-01T00:00:00Z",
|
||||
"chain_id": "test-chain-A2i3OZ",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
}
|
||||
],
|
||||
"app_hash": ""
|
||||
}
|
1
docs/examples/node1/config/node_key.json
Normal file
1
docs/examples/node1/config/node_key.json
Normal file
@@ -0,0 +1 @@
|
||||
{"priv_key":{"type":"954568A3288910","value":"H71dc/TIG7nTselfa9nG0WRArXLKYnm7P5eFCk2lk8ASKQ3sIHpbdxCSHQD/RcdHe7TiabJeuOssNPvPWiyQEQ=="}}
|
14
docs/examples/node1/config/priv_validator.json
Normal file
14
docs/examples/node1/config/priv_validator.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"address": "BEA1B57F5806CF9AC4D54C8CF806DED5C0F102E1",
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"priv_key": {
|
||||
"type": "954568A3288910",
|
||||
"value": "o0IqrHSPtd5YqGefodWxpJuRzvuVBjgbH785vbMgk7Vvno3kYJHVp1xVG4Q2N8rD+aubZ2SFPvA1ldX9IOwqxQ=="
|
||||
}
|
||||
}
|
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"genesis_time":"0001-01-01T00:00:00Z",
|
||||
"chain_id":"test-chain-wt7apy",
|
||||
"validators":[
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node1"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node2"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node3"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"priv_key" : {
|
||||
"data" : "DA9BAABEA7211A6D93D9A1986B4279EAB3021FAA1653D459D53E6AB4D1CFB4C69BF7D52E48CF00AC5779AA0A6D3C368955D5636A677F72370B8ED19989714CFC",
|
||||
"type" : "ed25519"
|
||||
}
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"address":"4DC2756029CE0D8F8C6C3E4C3CE6EE8C30AF352F",
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
},
|
||||
"last_height":0,
|
||||
"last_round":0,
|
||||
"last_step":0,
|
||||
"last_signature":null,
|
||||
"priv_key":{
|
||||
"type":"ed25519",
|
||||
"data":"4D3648E1D93C8703E436BFF814728B6BD270CFDFD686DF5385E8ACBEB7BE2D7DF08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
}
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
moniker = "penguin"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = ""
|
166
docs/examples/node2/config/config.toml
Normal file
166
docs/examples/node2/config/config.toml
Normal file
@@ -0,0 +1,166 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
##### main base config options #####
|
||||
|
||||
# TCP or UNIX socket address of the ABCI application,
|
||||
# or the name of an ABCI application compiled in with the Tendermint binary
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
|
||||
# A custom human readable name for this node
|
||||
moniker = "charlie"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast_sync = true
|
||||
|
||||
# Database backend: leveldb | memdb
|
||||
db_backend = "leveldb"
|
||||
|
||||
# Database directory
|
||||
db_path = "data"
|
||||
|
||||
# Output level for logging, including package level options
|
||||
log_level = "main:info,state:info,*:error"
|
||||
|
||||
##### additional base config options #####
|
||||
|
||||
# Path to the JSON file containing the initial validator set and other meta data
|
||||
genesis_file = "config/genesis.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
priv_validator_file = "config/priv_validator.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
|
||||
node_key_file = "config/node_key.json"
|
||||
|
||||
# Mechanism to connect to the ABCI application: socket | grpc
|
||||
abci = "socket"
|
||||
|
||||
# TCP or UNIX socket address for the profiling server to listen on
|
||||
prof_laddr = ""
|
||||
|
||||
# If true, query the ABCI app on connecting to a new peer
|
||||
# so the app can decide if we should keep the connection or not
|
||||
filter_peers = false
|
||||
|
||||
##### advanced configuration options #####
|
||||
|
||||
##### rpc server configuration options #####
|
||||
[rpc]
|
||||
|
||||
# TCP or UNIX socket address for the RPC server to listen on
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
grpc_laddr = ""
|
||||
|
||||
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||
unsafe = false
|
||||
|
||||
##### peer to peer configuration options #####
|
||||
[p2p]
|
||||
|
||||
# Address to listen for incoming connections
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = ""
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
# Do not add private peers to this list if you don't want them advertised
|
||||
persistent_peers = ""
|
||||
|
||||
# Path to address book
|
||||
addr_book_file = "config/addrbook.json"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
addr_book_strict = true
|
||||
|
||||
# Time to wait before flushing messages out on the connection, in ms
|
||||
flush_throttle_timeout = 100
|
||||
|
||||
# Maximum number of peers to connect to
|
||||
max_num_peers = 50
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
max_packet_msg_payload_size = 1024
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
send_rate = 512000
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
recv_rate = 512000
|
||||
|
||||
# Set true to enable the peer-exchange reactor
|
||||
pex = true
|
||||
|
||||
# Seed mode, in which node constantly crawls the network and looks for
|
||||
# peers. If another node asks it for addresses, it responds and disconnects.
|
||||
#
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = false
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = ""
|
||||
|
||||
##### mempool configuration options #####
|
||||
[mempool]
|
||||
|
||||
recheck = true
|
||||
recheck_empty = true
|
||||
broadcast = true
|
||||
wal_dir = "data/mempool.wal"
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
wal_file = "data/cs.wal/wal"
|
||||
|
||||
# All timeouts are in milliseconds
|
||||
timeout_propose = 3000
|
||||
timeout_propose_delta = 500
|
||||
timeout_prevote = 1000
|
||||
timeout_prevote_delta = 500
|
||||
timeout_precommit = 1000
|
||||
timeout_precommit_delta = 500
|
||||
timeout_commit = 1000
|
||||
|
||||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
skip_timeout_commit = false
|
||||
|
||||
# BlockSize
|
||||
max_block_size_txs = 10000
|
||||
max_block_size_bytes = 1
|
||||
|
||||
# EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||
create_empty_blocks = true
|
||||
create_empty_blocks_interval = 0
|
||||
|
||||
# Reactor sleep duration parameters are in milliseconds
|
||||
peer_gossip_sleep_duration = 100
|
||||
peer_query_maj23_sleep_duration = 2000
|
||||
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null" (default)
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "kv"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||
#
|
||||
# It's recommended to index only a subset of tags due to possible memory
|
||||
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||
# transactions.
|
||||
index_tags = ""
|
||||
|
||||
# When set to true, tells indexer to index all tags. Note this may be not
|
||||
# desirable (see the comment above). IndexTags has a precedence over
|
||||
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||
index_all_tags = false
|
39
docs/examples/node2/config/genesis.json
Normal file
39
docs/examples/node2/config/genesis.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"genesis_time": "0001-01-01T00:00:00Z",
|
||||
"chain_id": "test-chain-A2i3OZ",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
}
|
||||
],
|
||||
"app_hash": ""
|
||||
}
|
1
docs/examples/node2/config/node_key.json
Normal file
1
docs/examples/node2/config/node_key.json
Normal file
@@ -0,0 +1 @@
|
||||
{"priv_key":{"type":"954568A3288910","value":"COHZ/Y2cWGWxJNkRwtpQBt5sYvOnb6Gpz0lO46XERRJFBIdSWD5x1UMGRSTmnvW1ec5G4bMdg6zUZKOZD+vVPg=="}}
|
14
docs/examples/node2/config/priv_validator.json
Normal file
14
docs/examples/node2/config/priv_validator.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"address": "F0AA266949FB29ADA0B679C27889ED930BD1BDA1",
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"priv_key": {
|
||||
"type": "954568A3288910",
|
||||
"value": "khADeZ5K/8u/L99DFaZNRq8V5g+EHWbwfqFjhCrppaAiBkOkm8YDRMBqaJwDyKtzL5Ff8GRSWPoNfAzv3XLAhQ=="
|
||||
}
|
||||
}
|
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"genesis_time":"0001-01-01T00:00:00Z",
|
||||
"chain_id":"test-chain-wt7apy",
|
||||
"validators":[
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node1"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node2"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node3"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"priv_key" : {
|
||||
"data" : "F7BCABA165DFC0DDD50AE563EFB285BAA236EA805D35612504238A36EFA105958756442B1D9F942D7ABD259F2D59671657B6378E9C7194342A7AAA47A66D1E95",
|
||||
"type" : "ed25519"
|
||||
}
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"address": "DD6C63A762608A9DDD4A845657743777F63121D6",
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"last_signature": null,
|
||||
"priv_key": {
|
||||
"type": "ed25519",
|
||||
"data": "7B0DE666FF5E9B437D284BCE767F612381890C018B93B0A105D2E829A568DA6FA8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
}
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
moniker = "penguin"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = ""
|
166
docs/examples/node3/config/config.toml
Normal file
166
docs/examples/node3/config/config.toml
Normal file
@@ -0,0 +1,166 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
##### main base config options #####
|
||||
|
||||
# TCP or UNIX socket address of the ABCI application,
|
||||
# or the name of an ABCI application compiled in with the Tendermint binary
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
|
||||
# A custom human readable name for this node
|
||||
moniker = "delta"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast_sync = true
|
||||
|
||||
# Database backend: leveldb | memdb
|
||||
db_backend = "leveldb"
|
||||
|
||||
# Database directory
|
||||
db_path = "data"
|
||||
|
||||
# Output level for logging, including package level options
|
||||
log_level = "main:info,state:info,*:error"
|
||||
|
||||
##### additional base config options #####
|
||||
|
||||
# Path to the JSON file containing the initial validator set and other meta data
|
||||
genesis_file = "config/genesis.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
priv_validator_file = "config/priv_validator.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
|
||||
node_key_file = "config/node_key.json"
|
||||
|
||||
# Mechanism to connect to the ABCI application: socket | grpc
|
||||
abci = "socket"
|
||||
|
||||
# TCP or UNIX socket address for the profiling server to listen on
|
||||
prof_laddr = ""
|
||||
|
||||
# If true, query the ABCI app on connecting to a new peer
|
||||
# so the app can decide if we should keep the connection or not
|
||||
filter_peers = false
|
||||
|
||||
##### advanced configuration options #####
|
||||
|
||||
##### rpc server configuration options #####
|
||||
[rpc]
|
||||
|
||||
# TCP or UNIX socket address for the RPC server to listen on
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
grpc_laddr = ""
|
||||
|
||||
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||
unsafe = false
|
||||
|
||||
##### peer to peer configuration options #####
|
||||
[p2p]
|
||||
|
||||
# Address to listen for incoming connections
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = ""
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
# Do not add private peers to this list if you don't want them advertised
|
||||
persistent_peers = ""
|
||||
|
||||
# Path to address book
|
||||
addr_book_file = "config/addrbook.json"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
addr_book_strict = true
|
||||
|
||||
# Time to wait before flushing messages out on the connection, in ms
|
||||
flush_throttle_timeout = 100
|
||||
|
||||
# Maximum number of peers to connect to
|
||||
max_num_peers = 50
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
max_packet_msg_payload_size = 1024
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
send_rate = 512000
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
recv_rate = 512000
|
||||
|
||||
# Set true to enable the peer-exchange reactor
|
||||
pex = true
|
||||
|
||||
# Seed mode, in which node constantly crawls the network and looks for
|
||||
# peers. If another node asks it for addresses, it responds and disconnects.
|
||||
#
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = false
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = ""
|
||||
|
||||
##### mempool configuration options #####
|
||||
[mempool]
|
||||
|
||||
recheck = true
|
||||
recheck_empty = true
|
||||
broadcast = true
|
||||
wal_dir = "data/mempool.wal"
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
wal_file = "data/cs.wal/wal"
|
||||
|
||||
# All timeouts are in milliseconds
|
||||
timeout_propose = 3000
|
||||
timeout_propose_delta = 500
|
||||
timeout_prevote = 1000
|
||||
timeout_prevote_delta = 500
|
||||
timeout_precommit = 1000
|
||||
timeout_precommit_delta = 500
|
||||
timeout_commit = 1000
|
||||
|
||||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
skip_timeout_commit = false
|
||||
|
||||
# BlockSize
|
||||
max_block_size_txs = 10000
|
||||
max_block_size_bytes = 1
|
||||
|
||||
# EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||
create_empty_blocks = true
|
||||
create_empty_blocks_interval = 0
|
||||
|
||||
# Reactor sleep duration parameters are in milliseconds
|
||||
peer_gossip_sleep_duration = 100
|
||||
peer_query_maj23_sleep_duration = 2000
|
||||
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null" (default)
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "kv"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||
#
|
||||
# It's recommended to index only a subset of tags due to possible memory
|
||||
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||
# transactions.
|
||||
index_tags = ""
|
||||
|
||||
# When set to true, tells indexer to index all tags. Note this may be not
|
||||
# desirable (see the comment above). IndexTags has a precedence over
|
||||
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||
index_all_tags = false
|
39
docs/examples/node3/config/genesis.json
Normal file
39
docs/examples/node3/config/genesis.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"genesis_time": "0001-01-01T00:00:00Z",
|
||||
"chain_id": "test-chain-A2i3OZ",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
}
|
||||
],
|
||||
"app_hash": ""
|
||||
}
|
1
docs/examples/node3/config/node_key.json
Normal file
1
docs/examples/node3/config/node_key.json
Normal file
@@ -0,0 +1 @@
|
||||
{"priv_key":{"type":"954568A3288910","value":"9Y9xp/tUJJ6pHTF5SUV0bGKYSdVbFtMHu+Lr8S0JBSZAwneaejnfOEU1LMKOnQ07skrDUaJcj5di3jAyjxJzqg=="}}
|
14
docs/examples/node3/config/priv_validator.json
Normal file
14
docs/examples/node3/config/priv_validator.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"address": "9A1A6914EB5F4FF0269C7EEEE627C27310CC64F9",
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"priv_key": {
|
||||
"type": "954568A3288910",
|
||||
"value": "jb52LZ5gp+eQ8nJlFK1z06nBMp1gD8ICmyzdM1icGOgoYBl/Fm8hntptt4hDzlTUQIbr4jrYpJ1ofy6VzT46JQ=="
|
||||
}
|
||||
}
|
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"genesis_time":"0001-01-01T00:00:00Z",
|
||||
"chain_id":"test-chain-wt7apy",
|
||||
"validators":[
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node1"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node2"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node3"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"priv_key" : {
|
||||
"data" : "95136FCC97E4446B3141EDF9841078107ECE755E99925D79CCBF91085492680B3CA1034D9917DF1DED4E4AB2D9BC225919F6CB2176F210D2368697CC339DF4E7",
|
||||
"type" : "ed25519"
|
||||
}
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"address": "6D6A1E313B407B5474106CA8759C976B777AB659",
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"last_signature": null,
|
||||
"priv_key": {
|
||||
"type": "ed25519",
|
||||
"data": "622432A370111A5C25CFE121E163FE709C9D5C95F551EDBD7A2C69A8545C9B76E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
}
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
moniker = "penguin"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = ""
|
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"genesis_time":"0001-01-01T00:00:00Z",
|
||||
"chain_id":"test-chain-wt7apy",
|
||||
"validators":[
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node1"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node2"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node3"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"priv_key" : {
|
||||
"data" : "8895D6C9A1B46AB83A8E2BAE2121B8C3E245B9E9126EBD797FEAC5058285F2F64FDE2E8182C88AD5185A49D837C581465D57BD478C41865A66D7D9742D8AEF57",
|
||||
"type" : "ed25519"
|
||||
}
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"address": "829A9663611D3DD88A3D84EA0249679D650A0755",
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"last_signature": null,
|
||||
"priv_key": {
|
||||
"type": "ed25519",
|
||||
"data": "0A604D1C9AE94A50150BF39E603239092F9392E4773F4D8F4AC1D86E6438E89E2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
}
|
||||
}
|
@@ -40,10 +40,8 @@ Tendermint Tools
|
||||
:maxdepth: 2
|
||||
|
||||
deploy-testnets.rst
|
||||
tools/ansible.rst
|
||||
terraform-and-ansible.rst
|
||||
tools/docker.rst
|
||||
tools/mintnet-kubernetes.rst
|
||||
tools/terraform-digitalocean.rst
|
||||
tools/benchmarking.rst
|
||||
tools/monitoring.rst
|
||||
|
||||
@@ -57,7 +55,10 @@ Tendermint 102
|
||||
abci-spec.rst
|
||||
app-architecture.rst
|
||||
app-development.rst
|
||||
subscribing-to-events-via-websocket.rst
|
||||
indexing-transactions.rst
|
||||
how-to-read-logs.rst
|
||||
running-in-production.rst
|
||||
|
||||
Tendermint 201
|
||||
--------------
|
||||
@@ -67,6 +68,7 @@ Tendermint 201
|
||||
|
||||
specification.rst
|
||||
determinism.rst
|
||||
transactional-semantics.rst
|
||||
|
||||
* For a deeper dive, see `this thesis <https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769>`__.
|
||||
* There is also the `original whitepaper <https://tendermint.com/static/docs/tendermint.pdf>`__, though it is now quite outdated.
|
||||
|
100
docs/indexing-transactions.rst
Normal file
100
docs/indexing-transactions.rst
Normal file
@@ -0,0 +1,100 @@
|
||||
Indexing Transactions
|
||||
=====================
|
||||
|
||||
Tendermint allows you to index transactions and later query or subscribe to
|
||||
their results.
|
||||
|
||||
Let's take a look at the ``[tx_index]`` config section:
|
||||
|
||||
::
|
||||
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null" (default)
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "kv"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||
#
|
||||
# It's recommended to index only a subset of tags due to possible memory
|
||||
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||
# transactions.
|
||||
index_tags = ""
|
||||
|
||||
# When set to true, tells indexer to index all tags. Note this may be not
|
||||
# desirable (see the comment above). IndexTags has a precedence over
|
||||
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||
index_all_tags = false
|
||||
|
||||
By default, Tendermint will index all transactions by their respective hashes
|
||||
using an embedded simple indexer. Note, we are planning to add more options in
|
||||
the future (e.g., Postgresql indexer).
|
||||
|
||||
Adding tags
|
||||
-----------
|
||||
|
||||
In your application's ``DeliverTx`` method, add the ``Tags`` field with the
|
||||
pairs of UTF-8 encoded strings (e.g. "account.owner": "Bob", "balance":
|
||||
"100.0", "date": "2018-01-02").
|
||||
|
||||
Example:
|
||||
|
||||
::
|
||||
|
||||
func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result {
|
||||
...
|
||||
tags := []cmn.KVPair{
|
||||
{[]byte("account.name"), []byte("igor")},
|
||||
{[]byte("account.address"), []byte("0xdeadbeef")},
|
||||
{[]byte("tx.amount"), []byte("7")},
|
||||
}
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags}
|
||||
}
|
||||
|
||||
If you want Tendermint to only index transactions by "account.name" tag, in the
|
||||
config set ``tx_index.index_tags="account.name"``. If you to index all tags,
|
||||
set ``index_all_tags=true``
|
||||
|
||||
Note, there are a few predefined tags:
|
||||
|
||||
- ``tm.event`` (event type)
|
||||
- ``tx.hash`` (transaction's hash)
|
||||
- ``tx.height`` (height of the block transaction was committed in)
|
||||
|
||||
Tendermint will throw a warning if you try to use any of the above keys.
|
||||
|
||||
Quering transactions
|
||||
--------------------
|
||||
|
||||
You can query the transaction results by calling ``/tx_search`` RPC endpoint:
|
||||
|
||||
::
|
||||
|
||||
curl "localhost:46657/tx_search?query=\"account.name='igor'\"&prove=true"
|
||||
|
||||
Check out `API docs <https://tendermint.github.io/slate/?shell#txsearch>`__ for more
|
||||
information on query syntax and other options.
|
||||
|
||||
Subscribing to transactions
|
||||
---------------------------
|
||||
|
||||
Clients can subscribe to transactions with the given tags via Websocket by
|
||||
providing a query to ``/subscribe`` RPC endpoint.
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "subscribe",
|
||||
"id": "0",
|
||||
"params": {
|
||||
"query": "account.name='igor'"
|
||||
}
|
||||
}
|
||||
|
||||
Check out `API docs <https://tendermint.github.io/slate/#subscribe>`__ for more
|
||||
information on query syntax and other options.
|
@@ -1,56 +1,58 @@
|
||||
Install Tendermint
|
||||
==================
|
||||
|
||||
The fastest and easiest way to install the ``tendermint`` binary
|
||||
is to run `this script <https://github.com/tendermint/tendermint/blob/develop/scripts/install/install_tendermint_ubuntu.sh>`__ on
|
||||
a fresh Ubuntu instance,
|
||||
or `this script <https://github.com/tendermint/tendermint/blob/develop/scripts/install/install_tendermint_bsd.sh>`__
|
||||
on a fresh FreeBSD instance. Read the comments / instructions carefully (i.e., reset your terminal after running the script,
|
||||
make sure your okay with the network connections being made).
|
||||
|
||||
From Binary
|
||||
-----------
|
||||
|
||||
To download pre-built binaries, see the `Download page <https://tendermint.com/downloads>`__.
|
||||
To download pre-built binaries, see the `releases page <https://github.com/tendermint/tendermint/releases>`__.
|
||||
|
||||
From Source
|
||||
-----------
|
||||
|
||||
You'll need ``go``, maybe `dep <https://github.com/golang/dep>`__, and the Tendermint source code.
|
||||
|
||||
Install Go
|
||||
^^^^^^^^^^
|
||||
|
||||
Make sure you have `installed Go <https://golang.org/doc/install>`__ and
|
||||
set the ``GOPATH``. You should also put ``GOPATH/bin`` on your ``PATH``.
|
||||
You'll need ``go`` `installed <https://golang.org/doc/install>`__ and the required
|
||||
`environment variables set <https://github.com/tendermint/tendermint/wiki/Setting-GOPATH>`__
|
||||
|
||||
Get Source Code
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
You should be able to install the latest with a simple
|
||||
::
|
||||
|
||||
mkdir -p $GOPATH/src/github.com/tendermint
|
||||
cd $GOPATH/src/github.com/tendermint
|
||||
git clone https://github.com/tendermint/tendermint.git
|
||||
cd tendermint
|
||||
|
||||
Get Tools & Dependencies
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
::
|
||||
|
||||
go get github.com/tendermint/tendermint/cmd/tendermint
|
||||
|
||||
Run ``tendermint --help`` and ``tendermint version`` to ensure your
|
||||
installation worked.
|
||||
|
||||
If the installation failed, a dependency may have been updated and become
|
||||
incompatible with the latest Tendermint master branch. We solve this
|
||||
using the ``dep`` tool for dependency management.
|
||||
|
||||
First, install ``dep``:
|
||||
|
||||
::
|
||||
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
|
||||
Now we can fetch the correct versions of each dependency by running:
|
||||
Compile
|
||||
^^^^^^^
|
||||
|
||||
::
|
||||
|
||||
make get_vendor_deps
|
||||
make install
|
||||
|
||||
Note that even though ``go get`` originally failed, the repository was
|
||||
still cloned to the correct location in the ``$GOPATH``.
|
||||
to put the binary in ``$GOPATH/bin`` or use:
|
||||
|
||||
The latest Tendermint Core version is now installed.
|
||||
::
|
||||
|
||||
make build
|
||||
|
||||
to put the binary in ``./build``.
|
||||
|
||||
The latest ``tendermint version`` is now installed.
|
||||
|
||||
Reinstall
|
||||
---------
|
||||
@@ -86,20 +88,6 @@ do, use ``dep``, as above:
|
||||
Since the third option just uses ``dep`` right away, it should always
|
||||
work.
|
||||
|
||||
Troubleshooting
|
||||
---------------
|
||||
|
||||
If ``go get`` failing bothers you, fetch the code using ``git``:
|
||||
|
||||
::
|
||||
|
||||
mkdir -p $GOPATH/src/github.com/tendermint
|
||||
git clone https://github.com/tendermint/tendermint $GOPATH/src/github.com/tendermint/tendermint
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make install
|
||||
|
||||
Run
|
||||
^^^
|
||||
|
||||
|
203
docs/running-in-production.rst
Normal file
203
docs/running-in-production.rst
Normal file
@@ -0,0 +1,203 @@
|
||||
Running in production
|
||||
=====================
|
||||
|
||||
Logging
|
||||
-------
|
||||
|
||||
Default logging level (``main:info,state:info,*:``) should suffice for normal
|
||||
operation mode. Read `this post
|
||||
<https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756>`__
|
||||
for details on how to configure ``log_level`` config variable. Some of the
|
||||
modules can be found `here <./how-to-read-logs.html#list-of-modules>`__. If
|
||||
you're trying to debug Tendermint or asked to provide logs with debug logging
|
||||
level, you can do so by running tendermint with ``--log_level="*:debug"``.
|
||||
|
||||
DOS Exposure and Mitigation
|
||||
---------------------------
|
||||
|
||||
Validators are supposed to setup `Sentry Node Architecture
|
||||
<https://blog.cosmos.network/tendermint-explained-bringing-bft-based-pos-to-the-public-blockchain-domain-f22e274a0fdb>`__
|
||||
to prevent Denial-of-service attacks. You can read more about it `here
|
||||
<https://github.com/tendermint/aib-data/blob/develop/medium/TendermintBFT.md>`__.
|
||||
|
||||
P2P
|
||||
~~~
|
||||
|
||||
The core of the Tendermint peer-to-peer system is ``MConnection``. Each
|
||||
connection has ``MaxPacketMsgPayloadSize``, which is the maximum packet size
|
||||
and bounded send & receive queues. One can impose restrictions on send &
|
||||
receive rate per connection (``SendRate``, ``RecvRate``).
|
||||
|
||||
RPC
|
||||
~~~
|
||||
|
||||
Endpoints returning multiple entries are limited by default to return 30
|
||||
elements (100 max).
|
||||
|
||||
Rate-limiting and authentication are another key aspects to help protect
|
||||
against DOS attacks. While in the future we may implement these features, for
|
||||
now, validators are supposed to use external tools like `NGINX
|
||||
<https://www.nginx.com/blog/rate-limiting-nginx/>`__ or `traefik
|
||||
<https://docs.traefik.io/configuration/commons/#rate-limiting>`__ to achieve
|
||||
the same things.
|
||||
|
||||
Debugging Tendermint
|
||||
--------------------
|
||||
|
||||
If you ever have to debug Tendermint, the first thing you should probably do is
|
||||
to check out the logs. See `"How to read logs" <./how-to-read-logs.html>`__,
|
||||
where we explain what certain log statements mean.
|
||||
|
||||
If, after skimming through the logs, things are not clear still, the second
|
||||
TODO is to query the `/status` RPC endpoint. It provides the necessary info:
|
||||
whenever the node is syncing or not, what height it is on, etc.
|
||||
|
||||
```
|
||||
$ curl http(s)://{ip}:{rpcPort}/status
|
||||
```
|
||||
|
||||
`/dump_consensus_state` will give you a detailed overview of the consensus
|
||||
state (proposer, lastest validators, peers states). From it, you should be able
|
||||
to figure out why, for example, the network had halted.
|
||||
|
||||
```
|
||||
$ curl http(s)://{ip}:{rpcPort}/dump_consensus_state
|
||||
```
|
||||
|
||||
There is a reduced version of this endpoint - `/consensus_state`, which
|
||||
returns just the votes seen at the current height.
|
||||
|
||||
- `Github Issues <https://github.com/tendermint/tendermint/issues>`__
|
||||
- `StackOverflow questions <https://stackoverflow.com/questions/tagged/tendermint>`__
|
||||
|
||||
Monitoring Tendermint
|
||||
---------------------
|
||||
|
||||
Each Tendermint instance has a standard `/health` RPC endpoint, which responds
|
||||
with 200 (OK) if everything is fine and 500 (or no response) - if something is
|
||||
wrong.
|
||||
|
||||
Other useful endpoints include mentioned earlier `/status`, `/net_info` and
|
||||
`/validators`.
|
||||
|
||||
We have a small tool, called tm-monitor, which outputs information from the
|
||||
endpoints above plus some statistics. The tool can be found `here
|
||||
<https://github.com/tendermint/tools/tree/master/tm-monitor>`__.
|
||||
|
||||
What happens when my app dies?
|
||||
------------------------------
|
||||
|
||||
You are supposed to run Tendermint under a `process supervisor
|
||||
<https://en.wikipedia.org/wiki/Process_supervision>`__ (like systemd or runit).
|
||||
It will ensure Tendermint is always running (despite possible errors).
|
||||
|
||||
Getting back to the original question, if your application dies, Tendermint
|
||||
will panic. After a process supervisor restarts your application, Tendermint
|
||||
should be able to reconnect successfully. The order of restart does not matter
|
||||
for it.
|
||||
|
||||
Signal handling
|
||||
---------------
|
||||
|
||||
We catch SIGINT and SIGTERM and try to clean up nicely. For other signals we
|
||||
use the default behaviour in Go: `Default behavior of signals in Go programs
|
||||
<https://golang.org/pkg/os/signal/#hdr-Default_behavior_of_signals_in_Go_programs>`__.
|
||||
|
||||
Hardware
|
||||
--------
|
||||
|
||||
Processor and Memory
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
While actual specs vary depending on the load and validators count, minimal requirements are:
|
||||
|
||||
- 1GB RAM
|
||||
- 25GB of disk space
|
||||
- 1.4 GHz CPU
|
||||
|
||||
SSD disks are preferable for applications with high transaction throughput.
|
||||
|
||||
Recommended:
|
||||
|
||||
- 2GB RAM
|
||||
- 100GB SSD
|
||||
- x64 2.0 GHz 2v CPU
|
||||
|
||||
While for now, Tendermint stores all the history and it may require significant
|
||||
disk space over time, we are planning to implement state syncing (See `#828
|
||||
<https://github.com/tendermint/tendermint/issues/828>`__). So, storing all the
|
||||
past blocks will not be necessary.
|
||||
|
||||
Operating Systems
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tendermint can be compiled for a wide range of operating systems thanks to Go
|
||||
language (the list of $OS/$ARCH pairs can be found `here
|
||||
<https://golang.org/doc/install/source#environment>`__).
|
||||
|
||||
While we do not favor any operation system, more secure and stable Linux server
|
||||
distributions (like Centos) should be preferred over desktop operation systems
|
||||
(like Mac OS).
|
||||
|
||||
Misc.
|
||||
~~~~~
|
||||
|
||||
NOTE: if you are going to use Tendermint in a public domain, make sure you read
|
||||
`hardware recommendations (see "4. Hardware")
|
||||
<https://cosmos.network/validators>`__ for a validator in the Cosmos network.
|
||||
|
||||
Configuration parameters
|
||||
------------------------
|
||||
|
||||
- ``p2p.flush_throttle_timeout``
|
||||
``p2p.max_packet_msg_payload_size``
|
||||
``p2p.send_rate``
|
||||
``p2p.recv_rate``
|
||||
|
||||
If you are going to use Tendermint in a private domain and you have a private
|
||||
high-speed network among your peers, it makes sense to lower flush throttle
|
||||
timeout and increase other params.
|
||||
|
||||
::
|
||||
|
||||
[p2p]
|
||||
|
||||
send_rate=20000000 # 2MB/s
|
||||
recv_rate=20000000 # 2MB/s
|
||||
flush_throttle_timeout=10
|
||||
max_packet_msg_payload_size=10240 # 10KB
|
||||
|
||||
- ``mempool.recheck``
|
||||
|
||||
After every block, Tendermint rechecks every transaction left in the mempool to
|
||||
see if transactions committed in that block affected the application state, so
|
||||
some of the transactions left may become invalid. If that does not apply to
|
||||
your application, you can disable it by setting ``mempool.recheck=false``.
|
||||
|
||||
- ``mempool.broadcast``
|
||||
|
||||
Setting this to false will stop the mempool from relaying transactions to other
|
||||
peers until they are included in a block. It means only the peer you send the
|
||||
tx to will see it until it is included in a block.
|
||||
|
||||
- ``consensus.skip_timeout_commit``
|
||||
|
||||
We want skip_timeout_commit=false when there is economics on the line because
|
||||
proposers should wait to hear for more votes. But if you don't care about that
|
||||
and want the fastest consensus, you can skip it. It will be kept false by
|
||||
default for public deployments (e.g. `Cosmos Hub
|
||||
<https://cosmos.network/intro/hub>`__) while for enterprise applications,
|
||||
setting it to true is not a problem.
|
||||
|
||||
- ``consensus.peer_gossip_sleep_duration``
|
||||
|
||||
You can try to reduce the time your node sleeps before checking if theres something to send its peers.
|
||||
|
||||
- ``consensus.timeout_commit``
|
||||
|
||||
You can also try lowering ``timeout_commit`` (time we sleep before proposing the next block).
|
||||
|
||||
- ``consensus.max_block_size_txs``
|
||||
|
||||
By default, the maximum number of transactions per a block is 10_000. Feel free
|
||||
to change it to suit your needs.
|
76
docs/spec/README.md
Normal file
76
docs/spec/README.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Tendermint Specification
|
||||
|
||||
This is a markdown specification of the Tendermint blockchain.
|
||||
It defines the base data structures, how they are validated,
|
||||
and how they are communicated over the network.
|
||||
|
||||
If you find discrepancies between the spec and the code that
|
||||
do not have an associated issue or pull request on github,
|
||||
please submit them to our [bug bounty](https://tendermint.com/security)!
|
||||
|
||||
## Contents
|
||||
|
||||
- [Overview](#overview)
|
||||
|
||||
### Data Structures
|
||||
|
||||
- [Encoding and Digests](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md)
|
||||
- [Blockchain](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md)
|
||||
- [State](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/state.md)
|
||||
|
||||
### Consensus Protocol
|
||||
|
||||
- TODO
|
||||
|
||||
### P2P and Network Protocols
|
||||
|
||||
- [The Base P2P Layer](https://github.com/tendermint/tendermint/tree/master/docs/spec/p2p): multiplex the protocols ("reactors") on authenticated and encrypted TCP connections
|
||||
- [Peer Exchange (PEX)](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/pex): gossip known peer addresses so peers can find each other
|
||||
- [Block Sync](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/block_sync): gossip blocks so peers can catch up quickly
|
||||
- [Consensus](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus): gossip votes and block parts so new blocks can be committed
|
||||
- [Mempool](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/mempool): gossip transactions so they get included in blocks
|
||||
- Evidence: TODO
|
||||
|
||||
### More
|
||||
- Light Client: TODO
|
||||
- Persistence: TODO
|
||||
|
||||
## Overview
|
||||
|
||||
Tendermint provides Byzantine Fault Tolerant State Machine Replication using
|
||||
hash-linked batches of transactions. Such transaction batches are called "blocks".
|
||||
Hence, Tendermint defines a "blockchain".
|
||||
|
||||
Each block in Tendermint has a unique index - its Height.
|
||||
A block at `Height == H` can only be committed *after* the
|
||||
block at `Height == H-1`.
|
||||
Each block is committed by a known set of weighted Validators.
|
||||
Membership and weighting within this set may change over time.
|
||||
Tendermint guarantees the safety and liveness of the blockchain
|
||||
so long as less than 1/3 of the total weight of the Validator set
|
||||
is malicious or faulty.
|
||||
|
||||
A commit in Tendermint is a set of signed messages from more than 2/3 of
|
||||
the total weight of the current Validator set. Validators take turns proposing
|
||||
blocks and voting on them. Once enough votes are received, the block is considered
|
||||
committed. These votes are included in the *next* block as proof that the previous block
|
||||
was committed - they cannot be included in the current block, as that block has already been
|
||||
created.
|
||||
|
||||
Once a block is committed, it can be executed against an application.
|
||||
The application returns results for each of the transactions in the block.
|
||||
The application can also return changes to be made to the validator set,
|
||||
as well as a cryptographic digest of its latest state.
|
||||
|
||||
Tendermint is designed to enable efficient verification and authentication
|
||||
of the latest state of the blockchain. To achieve this, it embeds
|
||||
cryptographic commitments to certain information in the block "header".
|
||||
This information includes the contents of the block (eg. the transactions),
|
||||
the validator set committing the block, as well as the various results returned by the application.
|
||||
Note, however, that block execution only occurs *after* a block is committed.
|
||||
Thus, application results can only be included in the *next* block.
|
||||
|
||||
Also note that information like the transaction results and the validator set are never
|
||||
directly included in the block - only their cryptographic digests (Merkle roots) are.
|
||||
Hence, verification of a block requires a separate data structure to store this information.
|
||||
We call this the `State`. Block verification also requires access to the previous block.
|
@@ -162,7 +162,7 @@ We refer to certain globally available objects:
|
||||
and `state` keeps track of the validator set, the consensus parameters
|
||||
and other results from the application.
|
||||
Elements of an object are accessed as expected,
|
||||
ie. `block.Header`. See [here](state.md) for the definition of `state`.
|
||||
ie. `block.Header`. See [here](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/state.md) for the definition of `state`.
|
||||
|
||||
### Header
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
## Amino
|
||||
|
||||
Tendermint uses the Protobuf3 derrivative [Amino]() for all data structures.
|
||||
Tendermint uses the Protobuf3 derivative [Amino](https://github.com/tendermint/go-amino) for all data structures.
|
||||
Think of Amino as an object-oriented Protobuf3 with native JSON support.
|
||||
The goal of the Amino encoding protocol is to bring parity between application
|
||||
logic objects and persistence objects.
|
||||
@@ -39,7 +39,7 @@ place of the public key. Here we list the concrete types, their names,
|
||||
and prefix bytes for public keys and signatures, as well as the address schemes
|
||||
for each PubKey. Note for brevity we don't
|
||||
include details of the private keys beyond their type and name, as they can be
|
||||
derrived the same way as the others using Amino.
|
||||
derived the same way as the others using Amino.
|
||||
|
||||
All registered objects are encoded by Amino using a 4-byte PrefixBytes that
|
||||
uniquely identifies the object and includes information about its underlying
|
||||
@@ -49,107 +49,60 @@ spec](https://github.com/tendermint/go-amino#computing-the-prefix-and-disambigua
|
||||
In what follows, we provide the type names and prefix bytes directly.
|
||||
Notice that when encoding byte-arrays, the length of the byte-array is appended
|
||||
to the PrefixBytes. Thus the encoding of a byte array becomes `<PrefixBytes>
|
||||
<Length> <ByteArray>`
|
||||
<Length> <ByteArray>`. In other words, to encode any type listed below you do not need to be
|
||||
familiar with amino encoding.
|
||||
You can simply use below table and concatenate Prefix || Length (of raw bytes) || raw bytes
|
||||
( while || stands for byte concatenation here).
|
||||
|
||||
(NOTE: the remainder of this section on Public Key Cryptography can be generated
|
||||
from [this script](./scripts/crypto.go))
|
||||
| Type | Name | Prefix | Length |
|
||||
| ---- | ---- | ------ | ----- |
|
||||
| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE62 | 0x20 |
|
||||
| PubKeyLedgerEd25519 | tendermint/PubKeyLedgerEd25519 | 0x5C3453B2 | 0x20 |
|
||||
| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE982 | 0x21 |
|
||||
| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288912 | 0x40 |
|
||||
| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79A | 0x20 |
|
||||
| PrivKeyLedgerSecp256k1 | tendermint/PrivKeyLedgerSecp256k1 | 0x10CAB393 | variable |
|
||||
| PrivKeyLedgerEd25519 | tendermint/PrivKeyLedgerEd25519 | 0x0CFEEF9B | variable |
|
||||
| SignatureEd25519 | tendermint/SignatureKeyEd25519 | 0x3DA1DB2A | 0x40 |
|
||||
| SignatureSecp256k1 | tendermint/SignatureKeySecp256k1 | 0x16E1FEEA | variable |
|
||||
|
||||
### PubKeyEd25519
|
||||
### Examples
|
||||
|
||||
```
|
||||
// Name: tendermint/PubKeyEd25519
|
||||
// PrefixBytes: 0x1624DE62
|
||||
// Length: 0x20
|
||||
// Notes: raw 32-byte Ed25519 pubkey
|
||||
type PubKeyEd25519 [32]byte
|
||||
|
||||
func (pubkey PubKeyEd25519) Address() []byte {
|
||||
// NOTE: hash of the Amino encoded bytes!
|
||||
return RIPEMD160(AminoEncode(pubkey))
|
||||
}
|
||||
```
|
||||
|
||||
For example, the 32-byte Ed25519 pubkey
|
||||
`CCACD52F9B29D04393F01CD9AF6535455668115641F3D8BAEFD2295F24BAF60E` would be
|
||||
encoded as
|
||||
`1624DE6220CCACD52F9B29D04393F01CD9AF6535455668115641F3D8BAEFD2295F24BAF60E`.
|
||||
|
||||
The address would then be
|
||||
`RIPEMD160(0x1624DE6220CCACD52F9B29D04393F01CD9AF6535455668115641F3D8BAEFD2295F24BAF60E)`
|
||||
or `430FF75BAF1EC4B0D51BB3EEC2955479D0071605`
|
||||
|
||||
### SignatureEd25519
|
||||
|
||||
```
|
||||
// Name: tendermint/SignatureKeyEd25519
|
||||
// PrefixBytes: 0x3DA1DB2A
|
||||
// Length: 0x40
|
||||
// Notes: raw 64-byte Ed25519 signature
|
||||
type SignatureEd25519 [64]byte
|
||||
```
|
||||
|
||||
For example, the 64-byte Ed25519 signature
|
||||
`1B6034A8ED149D3C94FDA13EC03B26CC0FB264D9B0E47D3FA3DEF9FCDE658E49C80B35F9BE74949356401B15B18FB817D6E54495AD1C4A8401B248466CB0DB0B`
|
||||
1. For example, the 33-byte (or 0x21-byte in hex) Secp256k1 pubkey
|
||||
`020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9`
|
||||
would be encoded as
|
||||
`3DA1DB2A401B6034A8ED149D3C94FDA13EC03B26CC0FB264D9B0E47D3FA3DEF9FCDE658E49C80B35F9BE74949356401B15B18FB817D6E54495AD1C4A8401B248466CB0DB0B`
|
||||
|
||||
### PrivKeyEd25519
|
||||
|
||||
```
|
||||
// Name: tendermint/PrivKeyEd25519
|
||||
// Notes: raw 32-byte priv key concatenated to raw 32-byte pub key
|
||||
type PrivKeyEd25519 [64]byte
|
||||
```
|
||||
|
||||
### PubKeySecp256k1
|
||||
|
||||
```
|
||||
// Name: tendermint/PubKeySecp256k1
|
||||
// PrefixBytes: 0xEB5AE982
|
||||
// Length: 0x21
|
||||
// Notes: OpenSSL compressed pubkey prefixed with 0x02 or 0x03
|
||||
type PubKeySecp256k1 [33]byte
|
||||
|
||||
func (pubkey PubKeySecp256k1) Address() []byte {
|
||||
// NOTE: hash of the raw pubkey bytes (not Amino encoded!).
|
||||
// Compatible with Bitcoin addresses.
|
||||
return RIPEMD160(SHA256(pubkey[:]))
|
||||
}
|
||||
```
|
||||
|
||||
For example, the 33-byte Secp256k1 pubkey
|
||||
`020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` would be
|
||||
encoded as
|
||||
`EB5AE98221020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9`
|
||||
|
||||
The address would then be
|
||||
`RIPEMD160(SHA256(0x020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9))`
|
||||
or `0AE5BEE929ABE51BAD345DB925EEA652680783FC`
|
||||
|
||||
### SignatureSecp256k1
|
||||
|
||||
```
|
||||
// Name: tendermint/SignatureKeySecp256k1
|
||||
// PrefixBytes: 0x16E1FEEA
|
||||
// Length: Variable
|
||||
// Encoding prefix: Variable
|
||||
// Notes: raw bytes of the Secp256k1 signature
|
||||
type SignatureSecp256k1 []byte
|
||||
```
|
||||
|
||||
For example, the Secp256k1 signature
|
||||
2. For example, the variable size Secp256k1 signature (in this particular example 70 or 0x46 bytes)
|
||||
`304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7`
|
||||
would be encoded as
|
||||
`16E1FEEA46304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7`
|
||||
|
||||
### PrivKeySecp256k1
|
||||
### Addresses
|
||||
|
||||
Addresses for each public key types are computed as follows:
|
||||
|
||||
#### Ed25519
|
||||
|
||||
RIPEMD160 hash of the Amino encoded public key:
|
||||
|
||||
```
|
||||
// Name: tendermint/PrivKeySecp256k1
|
||||
// Notes: raw 32-byte priv key
|
||||
type PrivKeySecp256k1 [32]byte
|
||||
address = RIPEMD160(AMINO(pubkey))
|
||||
```
|
||||
|
||||
NOTE: this will soon change to the truncated 20-bytes of the SHA256 of the raw
|
||||
public key
|
||||
|
||||
#### Secp256k1
|
||||
|
||||
RIPEMD160 hash of the SHA256 hash of the OpenSSL compressed public key:
|
||||
|
||||
```
|
||||
address = RIPEMD160(SHA256(pubkey))
|
||||
```
|
||||
|
||||
This is the same as Bitcoin.
|
||||
|
||||
## Other Common Types
|
||||
|
||||
### BitArray
|
||||
@@ -290,6 +243,7 @@ Amino also supports JSON encoding - registered types are simply encoded as:
|
||||
"type": "<DisfixBytes>",
|
||||
"value": <JSON>
|
||||
}
|
||||
```
|
||||
|
||||
For instance, an ED25519 PubKey would look like:
|
||||
|
@@ -77,5 +77,4 @@ func TotalVotingPower(vals []Validators) int64{
|
||||
|
||||
### ConsensusParams
|
||||
|
||||
TODO:
|
||||
|
||||
TODO
|
@@ -52,20 +52,33 @@ objects in the `ResponseBeginBlock`:
|
||||
|
||||
```
|
||||
message Validator {
|
||||
bytes pub_key = 1;
|
||||
int64 power = 2;
|
||||
bytes address = 1;
|
||||
PubKey pub_key = 2;
|
||||
int64 power = 3;
|
||||
}
|
||||
|
||||
message PubKey {
|
||||
string type = 1;
|
||||
bytes data = 2;
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
The `pub_key` is the Amino encoded public key for the validator. For details on
|
||||
Amino encoded public keys, see the [section of the encoding spec](./encoding.md#public-key-cryptography).
|
||||
The `pub_key` currently supports two types:
|
||||
- `type = "ed25519" and `data = <raw 32-byte public key>`
|
||||
- `type = "secp256k1" and `data = <33-byte OpenSSL compressed public key>`
|
||||
|
||||
If the address is provided, it must match the address of the pubkey, as
|
||||
specified [here](/docs/spec/blockchain/encoding.md#Addresses)
|
||||
|
||||
(Note: In the v0.19 series, the `pub_key` is the [Amino encoded public
|
||||
key](/docs/spec/blockchain/encoding.md#public-key-cryptography).
|
||||
For Ed25519 pubkeys, the Amino prefix is always "1624DE6220". For example, the 32-byte Ed25519 pubkey
|
||||
`76852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85` would be
|
||||
Amino encoded as
|
||||
`1624DE622076852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85`
|
||||
`1624DE622076852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85`)
|
||||
|
||||
(Note: in old versions of Tendermint (pre-v0.19.0), the pubkey is just prefixed with a
|
||||
(Note: In old versions of Tendermint (pre-v0.19.0), the pubkey is just prefixed with a
|
||||
single type byte, so for ED25519 we'd have `pub_key = 0x1 | pub`)
|
||||
|
||||
The `power` is the new voting power for the validator, with the
|
||||
@@ -79,6 +92,19 @@ following rules:
|
||||
set with the given power
|
||||
- if the validator does already exist, its power will be adjusted to the given power
|
||||
|
||||
## InitChain Validator Updates
|
||||
|
||||
ResponseInitChain has the option to return a list of validators.
|
||||
If the list is not empty, Tendermint will adopt it for the validator set.
|
||||
This way the application can determine the initial validator set for the
|
||||
blockchain.
|
||||
|
||||
Note that if addressses are included in the returned validators, they must match
|
||||
the address of the public key.
|
||||
|
||||
ResponseInitChain also includes ConsensusParams, but these are presently
|
||||
ignored.
|
||||
|
||||
## Query
|
||||
|
||||
Query is a generic message type with lots of flexibility to enable diverse sets
|
||||
@@ -94,7 +120,7 @@ using the following paths, with no additional data:
|
||||
|
||||
- `/p2p/filter/addr/<IP:PORT>`, where `<IP:PORT>` denote the IP address and
|
||||
the port of the connection
|
||||
- `p2p/filter/pubkey/<ID>`, where `<ID>` is the peer node ID (ie. the
|
||||
- `p2p/filter/id/<ID>`, where `<ID>` is the peer node ID (ie. the
|
||||
pubkey.Address() for the peer's PubKey)
|
||||
|
||||
If either of these queries return a non-zero ABCI code, Tendermint will refuse
|
||||
@@ -121,7 +147,6 @@ stateBlockHeight = height of the last block for which Tendermint completed all
|
||||
block processing and saved all ABCI results to disk
|
||||
appBlockHeight = height of the last block for which ABCI app succesfully
|
||||
completely Commit
|
||||
|
||||
```
|
||||
|
||||
Note we always have `storeBlockHeight >= stateBlockHeight` and `storeBlockHeight >= appBlockHeight`
|
||||
@@ -165,4 +190,3 @@ If `storeBlockHeight == stateBlockHeight+1`
|
||||
If appBlockHeight == storeBlockHeight {
|
||||
update the state using the saved ABCI responses but dont run the block against the real app.
|
||||
This happens if we crashed after the app finished Commit but before Tendermint saved the state.
|
||||
|
33
docs/spec/consensus/wal.md
Normal file
33
docs/spec/consensus/wal.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# WAL
|
||||
|
||||
Consensus module writes every message to the WAL (write-ahead log).
|
||||
|
||||
It also issues fsync syscall through
|
||||
[File#Sync](https://golang.org/pkg/os/#File.Sync) for messages signed by this
|
||||
node (to prevent double signing).
|
||||
|
||||
Under the hood, it uses
|
||||
[autofile.Group](https://godoc.org/github.com/tendermint/tmlibs/autofile#Group),
|
||||
which rotates files when those get too big (> 10MB).
|
||||
|
||||
The total maximum size is 1GB. We only need the latest block and the block before it,
|
||||
but if the former is dragging on across many rounds, we want all those rounds.
|
||||
|
||||
## Replay
|
||||
|
||||
Consensus module will replay all the messages of the last height written to WAL
|
||||
before a crash (if such occurs).
|
||||
|
||||
The private validator may try to sign messages during replay because it runs
|
||||
somewhat autonomously and does not know about replay process.
|
||||
|
||||
For example, if we got all the way to precommit in the WAL and then crash,
|
||||
after we replay the proposal message, the private validator will try to sign a
|
||||
prevote. But it will fail. That's ok because we’ll see the prevote later in the
|
||||
WAL. Then it will go to precommit, and that time it will work because the
|
||||
private validator contains the `LastSignBytes` and then we’ll replay the
|
||||
precommit from the WAL.
|
||||
|
||||
Make sure to read about [WAL
|
||||
corruption](https://tendermint.readthedocs.io/projects/tools/en/master/specification/corruption.html#wal-corruption)
|
||||
and recovery strategies.
|
@@ -12,7 +12,7 @@ Seeds should operate full nodes with the PEX reactor in a "crawler" mode
|
||||
that continuously explores to validate the availability of peers.
|
||||
|
||||
Seeds should only respond with some top percentile of the best peers it knows about.
|
||||
See [the peer-exchange docs](/docs/specification/new-spec/reactors/pex/pex.md)for details on peer quality.
|
||||
See [the peer-exchange docs](https://github.com/tendermint/tendermint/blob/master/docs/spec/reactors/pex/pex.md)for details on peer quality.
|
||||
|
||||
## New Full Node
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
This document explains how Tendermint Peers are identified and how they connect to one another.
|
||||
|
||||
For details on peer discovery, see the [peer exchange (PEX) reactor doc](/docs/specification/new-spec/reactors/pex/pex.md).
|
||||
For details on peer discovery, see the [peer exchange (PEX) reactor doc](https://github.com/tendermint/tendermint/blob/master/docs/spec/reactors/pex/pex.md).
|
||||
|
||||
## Peer Identity
|
||||
|
||||
@@ -17,9 +17,6 @@ We will attempt to connect to the peer at IP:PORT, and verify,
|
||||
via authenticated encryption, that it is in possession of the private key
|
||||
corresponding to `<ID>`. This prevents man-in-the-middle attacks on the peer layer.
|
||||
|
||||
If `auth_enc = false`, peers can use an arbitrary ID, but they must always use
|
||||
one. Authentication can then happen out-of-band of Tendermint, for instance via VPN.
|
||||
|
||||
## Connections
|
||||
|
||||
All p2p connections use TCP.
|
@@ -47,3 +47,13 @@ type bcStatusResponseMessage struct {
|
||||
## Protocol
|
||||
|
||||
TODO
|
||||
|
||||
## Channels
|
||||
|
||||
Defines `maxMsgSize` for the maximum size of incoming messages,
|
||||
`SendQueueCapacity` and `RecvBufferCapacity` for maximum sending and
|
||||
receiving buffers respectively. These are supposed to prevent amplification
|
||||
attacks by setting up the upper limit on how much data we can receive & send to
|
||||
a peer.
|
||||
|
||||
Sending incorrectly encoded data will result in stopping the peer.
|
@@ -342,3 +342,11 @@ It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state
|
||||
broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel.
|
||||
Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel.
|
||||
`ProposalHeartbeatMessage` is sent the same way on the StateChannel.
|
||||
|
||||
## Channels
|
||||
|
||||
Defines 4 channels: state, data, vote and vote_set_bits. Each channel
|
||||
has `SendQueueCapacity` and `RecvBufferCapacity` and
|
||||
`RecvMessageCapacity` set to `maxMsgSize`.
|
||||
|
||||
Sending incorrectly encoded data will result in stopping the peer.
|
@@ -16,7 +16,7 @@ explained in a forthcoming document.
|
||||
For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the
|
||||
block as the block size is big, i.e., they don't embed the block inside `Proposal` and
|
||||
`VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in
|
||||
[Blockchain](blockchain.md) section) that uniquely identifies each block. The block itself is
|
||||
[Blockchain](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md#blockid) section) that uniquely identifies each block. The block itself is
|
||||
disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a
|
||||
proposer first splitting a block into a number of block parts, that are then gossiped between
|
||||
processes using `BlockPartMessage`.
|
||||
@@ -69,7 +69,7 @@ BlockID contains PartSetHeader.
|
||||
## VoteMessage
|
||||
|
||||
VoteMessage is sent to vote for some block (or to inform others that a process does not vote in the
|
||||
current round). Vote is defined in [Blockchain](blockchain.md) section and contains validator's
|
||||
current round). Vote is defined in the [Blockchain](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md#blockid) section and contains validator's
|
||||
information (validator address and index), height and round for which the vote is sent, vote type,
|
||||
blockID if process vote for some block (`nil` otherwise) and a timestamp when the vote is sent. The
|
||||
message is signed by the validator private key.
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user