mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-16 04:41:59 +00:00
Compare commits
230 Commits
zarko/add-
...
v0.20.0-rc
Author | SHA1 | Date | |
---|---|---|---|
|
73de99ecab | ||
|
2046b346a1 | ||
|
c9514f077b | ||
|
3bf9a7dc50 | ||
|
53b0c67f75 | ||
|
3b8c1ae119 | ||
|
849ffaf43d | ||
|
058867669e | ||
|
923e0b02bf | ||
|
ec34c8f9d2 | ||
|
6004587347 | ||
|
7f20eb5f8e | ||
|
eeabb4c06b | ||
|
4da81aa0b7 | ||
|
67068a34f2 | ||
|
2a0e9f93ce | ||
|
708f35e5c1 | ||
|
f3f5c7f472 | ||
|
68f6226bea | ||
|
118b86b1ef | ||
|
b9afcbe3a2 | ||
|
a885af0826 | ||
|
3a947b0117 | ||
|
caf5afc084 | ||
|
2aa5285c66 | ||
|
b166831fb5 | ||
|
423fef1416 | ||
|
b4d10b5b91 | ||
|
6f1bfb6280 | ||
|
5e7177053c | ||
|
a0201e7862 | ||
|
126ddca1a6 | ||
|
186d38dd8a | ||
|
01fd102dba | ||
|
e11f3167ff | ||
|
7d98cfd3d6 | ||
|
4848e88737 | ||
|
60d7486de2 | ||
|
229c18f1bd | ||
|
91b6d3f18c | ||
|
20e9dd0737 | ||
|
7b02b5b66b | ||
|
0cd92a4948 | ||
|
747f28f85f | ||
|
a9d0adbdef | ||
|
3485edf4f5 | ||
|
c6f612bfc3 | ||
|
bb9aa85d22 | ||
|
c4fef499b6 | ||
|
b77d5344fc | ||
|
21f5f3faa7 | ||
|
bf6527fc59 | ||
|
ed8d9951c0 | ||
|
97b39f340e | ||
|
383c255f35 | ||
|
931fb385d7 | ||
|
018e096748 | ||
|
ee4eb59355 | ||
|
082a02e6d1 | ||
|
2c40966e46 | ||
|
0a9dc9f875 | ||
|
87cefb724d | ||
|
6701dba876 | ||
|
442bbe592f | ||
|
301aa92f9c | ||
|
52f27686ef | ||
|
6f9867cba6 | ||
|
02615c8695 | ||
|
2df137193c | ||
|
1ef415728d | ||
|
773e3917ec | ||
|
26fdfe10fd | ||
|
d76e2dc3ff | ||
|
420f925a4d | ||
|
d7d12c8030 | ||
|
6c4a26f248 | ||
|
2a26c47da5 | ||
|
aabe96f1af | ||
|
0e1f730fbb | ||
|
0b68ec4b8e | ||
|
ca120798e4 | ||
|
595fc24c56 | ||
|
4611cf44f0 | ||
|
d596ed1bc2 | ||
|
0fb33ca91d | ||
|
35428ceb53 | ||
|
de8d4325de | ||
|
5a041baa36 | ||
|
202a43a5af | ||
|
2987158a65 | ||
|
c9001d5a11 | ||
|
90446261f3 | ||
|
ae572b9038 | ||
|
0908e668bd | ||
|
e0dbc3673c | ||
|
545990f845 | ||
|
19ccd1842f | ||
|
b4d6bf7697 | ||
|
1854ce41fc | ||
|
547e8223b9 | ||
|
8e46df14e7 | ||
|
8d60a5a7bd | ||
|
5115618550 | ||
|
a6b74b82d1 | ||
|
e5220360c5 | ||
|
b5c4098c53 | ||
|
bc8768cfea | ||
|
d832bde280 | ||
|
5e3a23df6d | ||
|
6f7333fd5f | ||
|
58e3246ffc | ||
|
bbe1355957 | ||
|
7c14fa820d | ||
|
0d93424c6a | ||
|
efc01cf582 | ||
|
754be1887c | ||
|
775b015173 | ||
|
b698a9febc | ||
|
c5f45275ec | ||
|
77f09f5b5e | ||
|
1fe41be929 | ||
|
68a0b3f95b | ||
|
b1f3c11948 | ||
|
e1a3f16fa4 | ||
|
aab98828fe | ||
|
03f6a29a64 | ||
|
4851653d8e | ||
|
162811476a | ||
|
b5ac9ede8a | ||
|
ff5dfc0c15 | ||
|
d3a98675aa | ||
|
e3c4625e63 | ||
|
01ac378c96 | ||
|
83ca46396d | ||
|
2c125b6c78 | ||
|
3dde0584ed | ||
|
4bca7c1009 | ||
|
4be3ffbe9b | ||
|
5b9a1423ae | ||
|
16932f889f | ||
|
e9804d76cf | ||
|
a41f0d3891 | ||
|
658060150c | ||
|
d0229e8b1e | ||
|
56c9e0da7e | ||
|
fbe253767e | ||
|
edbec10f9e | ||
|
c0a1a8d3c0 | ||
|
ac2d3a917e | ||
|
64408a4041 | ||
|
cae31157b1 | ||
|
66c2b60324 | ||
|
e2e2127365 | ||
|
f395b82f73 | ||
|
47557f868a | ||
|
b6c062c451 | ||
|
12fc396101 | ||
|
c195772de1 | ||
|
d3c4f746a7 | ||
|
fae94a44a2 | ||
|
3a30ee75b9 | ||
|
3498b676a6 | ||
|
6157c700dd | ||
|
c90bf77566 | ||
|
6805ddf1b8 | ||
|
2761861b6b | ||
|
64569b15e5 | ||
|
0450e35d67 | ||
|
268055e549 | ||
|
aaa81092e7 | ||
|
3ee1d7909e | ||
|
32268a8135 | ||
|
f645187122 | ||
|
40c79235c0 | ||
|
c23909eecf | ||
|
936d1a0e68 | ||
|
0cbbb61962 | ||
|
fa66694f2e | ||
|
d92def4b60 | ||
|
26f633ed48 | ||
|
79bfbebfff | ||
|
f33da8817a | ||
|
ffe81a0206 | ||
|
0e00154fcc | ||
|
1ab89e6cbf | ||
|
14cff484f1 | ||
|
25cee8827a | ||
|
65ebbccb74 | ||
|
1188dfe7ee | ||
|
c45ba2967a | ||
|
f67c5a9e7b | ||
|
593a785ae2 | ||
|
94e823cc91 | ||
|
47e4d64973 | ||
|
a2d77cbe4e | ||
|
390b592dec | ||
|
9ab1fafdf1 | ||
|
94c016a04e | ||
|
97f3ada9c2 | ||
|
d7d4471072 | ||
|
d48a6f930d | ||
|
389a6ffa16 | ||
|
f1ead2df70 | ||
|
e5951acfb4 | ||
|
0e1414ef9d | ||
|
97be1eef87 | ||
|
0d9004a854 | ||
|
91c81ef9a1 | ||
|
e2f0778c14 | ||
|
63f8c58009 | ||
|
9ba208c1f5 | ||
|
6ce6b20993 | ||
|
5361073439 | ||
|
6c04465d3d | ||
|
089ce6744c | ||
|
17a5c6fa1a | ||
|
18c3f8f3f1 | ||
|
b42d5a2211 | ||
|
b20e777f53 | ||
|
659762736c | ||
|
5b5acbb343 | ||
|
a2930cd723 | ||
|
8bdfe15de9 | ||
|
b3904b8da8 | ||
|
f2dae2a2d8 | ||
|
e88f74bb9b | ||
|
5d8767e656 | ||
|
54adb790f2 | ||
|
5ef639fcbe | ||
|
34f5d439ee |
@@ -77,6 +77,22 @@ jobs:
|
|||||||
paths:
|
paths:
|
||||||
- "bin/abci*"
|
- "bin/abci*"
|
||||||
|
|
||||||
|
build_slate:
|
||||||
|
<<: *defaults
|
||||||
|
steps:
|
||||||
|
- attach_workspace:
|
||||||
|
at: /tmp/workspace
|
||||||
|
- restore_cache:
|
||||||
|
key: v1-pkg-cache
|
||||||
|
- restore_cache:
|
||||||
|
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||||
|
- run:
|
||||||
|
name: slate docs
|
||||||
|
command: |
|
||||||
|
set -ex
|
||||||
|
export PATH="$GOBIN:$PATH"
|
||||||
|
make build-slate
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
<<: *defaults
|
<<: *defaults
|
||||||
steps:
|
steps:
|
||||||
@@ -153,7 +169,7 @@ jobs:
|
|||||||
- checkout
|
- checkout
|
||||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||||
- run: bash test/circleci/p2p.sh
|
- run: bash test/p2p/circleci.sh
|
||||||
|
|
||||||
upload_coverage:
|
upload_coverage:
|
||||||
<<: *defaults
|
<<: *defaults
|
||||||
@@ -180,6 +196,9 @@ workflows:
|
|||||||
test-suite:
|
test-suite:
|
||||||
jobs:
|
jobs:
|
||||||
- setup_dependencies
|
- setup_dependencies
|
||||||
|
- build_slate:
|
||||||
|
requires:
|
||||||
|
- setup_dependencies
|
||||||
- setup_abci:
|
- setup_abci:
|
||||||
requires:
|
requires:
|
||||||
- setup_dependencies
|
- setup_dependencies
|
||||||
|
4
.github/ISSUE_TEMPLATE
vendored
4
.github/ISSUE_TEMPLATE
vendored
@@ -19,10 +19,6 @@ in a case of bug.
|
|||||||
|
|
||||||
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
|
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
|
||||||
|
|
||||||
|
|
||||||
**Merkleeyes version** (use `git rev-parse --verify HEAD`, skip if you don't use it):
|
|
||||||
|
|
||||||
|
|
||||||
**Environment**:
|
**Environment**:
|
||||||
- **OS** (e.g. from /etc/os-release):
|
- **OS** (e.g. from /etc/os-release):
|
||||||
- **Install tools**:
|
- **Install tools**:
|
||||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -5,7 +5,6 @@
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
build/*
|
build/*
|
||||||
rpc/test/.tendermint
|
rpc/test/.tendermint
|
||||||
.debora
|
|
||||||
.tendermint
|
.tendermint
|
||||||
remote_dump
|
remote_dump
|
||||||
.revision
|
.revision
|
||||||
@@ -13,7 +12,6 @@ vendor
|
|||||||
.vagrant
|
.vagrant
|
||||||
test/p2p/data/
|
test/p2p/data/
|
||||||
test/logs
|
test/logs
|
||||||
.glide
|
|
||||||
coverage.txt
|
coverage.txt
|
||||||
docs/_build
|
docs/_build
|
||||||
docs/tools
|
docs/tools
|
||||||
@@ -25,3 +23,5 @@ scripts/cutWALUntil/cutWALUntil
|
|||||||
|
|
||||||
.idea/
|
.idea/
|
||||||
*.iml
|
*.iml
|
||||||
|
|
||||||
|
libs/pubsub/query/fuzz_test/output
|
||||||
|
137
CHANGELOG.md
137
CHANGELOG.md
@@ -1,28 +1,126 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
## Roadmap
|
## 0.20.0
|
||||||
|
|
||||||
BREAKING CHANGES:
|
BREAKING:
|
||||||
- Better support for injecting randomness
|
|
||||||
- Upgrade consensus for more real-time use of evidence
|
|
||||||
|
|
||||||
FEATURES:
|
- [libs/pubsub] TagMap#Get returns a string value
|
||||||
- Use the chain as its own CA for nodes and validators
|
- [libs/pubsub] NewTagMap accepts a map of strings
|
||||||
- Tooling to run multiple blockchains/apps, possibly in a single process
|
|
||||||
- State syncing (without transaction replay)
|
## 0.19.6
|
||||||
- Add authentication and rate-limitting to the RPC
|
|
||||||
|
FEATURES
|
||||||
|
|
||||||
|
- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate
|
||||||
|
|
||||||
IMPROVEMENTS:
|
IMPROVEMENTS:
|
||||||
- Improve subtleties around mempool caching and logic
|
|
||||||
- Consensus optimizations:
|
- [consensus] consensus reactor now receives events from a separate event bus,
|
||||||
- cache block parts for faster agreement after round changes
|
which is not dependant on external RPC load
|
||||||
- propagate block parts rarest first
|
- [consensus/wal] do not look for height in older files if we've seen height - 1
|
||||||
- Better testing of the consensus state machine (ie. use a DSL)
|
|
||||||
- Auto compiled serialization/deserialization code instead of go-wire reflection
|
## 0.19.5
|
||||||
|
|
||||||
|
*May 20th, 2018*
|
||||||
|
|
||||||
|
BREAKING CHANGES
|
||||||
|
|
||||||
|
- [rpc/client] TxSearch and UnconfirmedTxs have new arguments (see below)
|
||||||
|
- [rpc/client] TxSearch returns ResultTxSearch
|
||||||
|
- [version] Breaking changes to Go APIs will not be reflected in breaking
|
||||||
|
version change, but will be included in changelog.
|
||||||
|
|
||||||
|
FEATURES
|
||||||
|
|
||||||
|
- [rpc] `/tx_search` takes `page` (starts at 1) and `per_page` (max 100, default 30) args to paginate results
|
||||||
|
- [rpc] `/unconfirmed_txs` takes `limit` (max 100, default 30) arg to limit the output
|
||||||
|
- [config] `mempool.size` and `mempool.cache_size` options
|
||||||
|
|
||||||
|
IMPROVEMENTS
|
||||||
|
|
||||||
|
- [docs] Lots of updates
|
||||||
|
- [consensus] Only Fsync() the WAL before executing msgs from ourselves
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
|
- [mempool] Enforce upper bound on number of transactions
|
||||||
|
|
||||||
|
## 0.19.4 (May 17th, 2018)
|
||||||
|
|
||||||
|
IMPROVEMENTS
|
||||||
|
|
||||||
|
- [state] Improve tx indexing by using batches
|
||||||
|
- [consensus, state] Improve logging (more consensus logs, fewer tx logs)
|
||||||
|
- [spec] Moved to `docs/spec` (TODO cleanup the rest of the docs ...)
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
|
- [consensus] Fix issue #1575 where a late proposer can get stuck
|
||||||
|
|
||||||
|
## 0.19.3 (May 14th, 2018)
|
||||||
|
|
||||||
|
FEATURES
|
||||||
|
|
||||||
|
- [rpc] New `/consensus_state` returns just the votes seen at the current height
|
||||||
|
|
||||||
|
IMPROVEMENTS
|
||||||
|
|
||||||
|
- [rpc] Add stringified votes and fraction of power voted to `/dump_consensus_state`
|
||||||
|
- [rpc] Add PeerStateStats to `/dump_consensus_state`
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
|
- [cmd] Set GenesisTime during `tendermint init`
|
||||||
|
- [consensus] fix ValidBlock rules
|
||||||
|
|
||||||
|
## 0.19.2 (April 30th, 2018)
|
||||||
|
|
||||||
|
FEATURES:
|
||||||
|
|
||||||
|
- [p2p] Allow peers with different Minor versions to connect
|
||||||
|
- [rpc] `/net_info` includes `n_peers`
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
- [p2p] Various code comments, cleanup, error types
|
||||||
|
- [p2p] Change some Error logs to Debug
|
||||||
|
|
||||||
BUG FIXES:
|
BUG FIXES:
|
||||||
- Graceful handling/recovery for apps that have non-determinism or fail to halt
|
|
||||||
- Graceful handling/recovery for violations of safety, or liveness
|
- [p2p] Fix reconnect to persistent peer when first dial fails
|
||||||
|
- [p2p] Validate NodeInfo.ListenAddr
|
||||||
|
- [p2p] Only allow (MaxNumPeers - MaxNumOutboundPeers) inbound peers
|
||||||
|
- [p2p/pex] Limit max msg size to 64kB
|
||||||
|
- [p2p] Fix panic when pex=false
|
||||||
|
- [p2p] Allow multiple IPs per ID in AddrBook
|
||||||
|
- [p2p] Fix before/after bugs in addrbook isBad()
|
||||||
|
|
||||||
|
## 0.19.1 (April 27th, 2018)
|
||||||
|
|
||||||
|
Note this release includes some small breaking changes in the RPC and one in the
|
||||||
|
config that are really bug fixes. v0.19.1 will work with existing chains, and make Tendermint
|
||||||
|
easier to use and debug. With <3
|
||||||
|
|
||||||
|
BREAKING (MINOR)
|
||||||
|
|
||||||
|
- [config] Removed `wal_light` setting. If you really needed this, let us know
|
||||||
|
|
||||||
|
FEATURES:
|
||||||
|
|
||||||
|
- [networks] moved in tooling from devops repo: terraform and ansible scripts for deploying testnets !
|
||||||
|
- [cmd] Added `gen_node_key` command
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
|
Some of these are breaking in the RPC response, but they're really bugs!
|
||||||
|
|
||||||
|
- [spec] Document address format and pubkey encoding pre and post Amino
|
||||||
|
- [rpc] Lower case JSON field names
|
||||||
|
- [rpc] Fix missing entries, improve, and lower case the fields in `/dump_consensus_state`
|
||||||
|
- [rpc] Fix NodeInfo.Channels format to hex
|
||||||
|
- [rpc] Add Validator address to `/status`
|
||||||
|
- [rpc] Fix `prove` in ABCIQuery
|
||||||
|
- [cmd] MarshalJSONIndent on init
|
||||||
|
|
||||||
## 0.19.0 (April 13th, 2018)
|
## 0.19.0 (April 13th, 2018)
|
||||||
|
|
||||||
@@ -39,8 +137,9 @@ See github.com/tendermint/go-amino for details on the new format.
|
|||||||
See `scripts/wire2amino.go` for a tool to upgrade
|
See `scripts/wire2amino.go` for a tool to upgrade
|
||||||
genesis/priv_validator/node_key JSON files.
|
genesis/priv_validator/node_key JSON files.
|
||||||
|
|
||||||
FEATURES:
|
FEATURES
|
||||||
- [cmd] added `gen_node_key` command
|
|
||||||
|
- [test] docker-compose for local testnet setup (thanks Greg!)
|
||||||
|
|
||||||
## 0.18.0 (April 6th, 2018)
|
## 0.18.0 (April 6th, 2018)
|
||||||
|
|
||||||
|
1
DOCKER/.gitignore
vendored
Normal file
1
DOCKER/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
tendermint
|
@@ -1,45 +1,39 @@
|
|||||||
FROM alpine:3.7
|
FROM alpine:3.7
|
||||||
|
MAINTAINER Greg Szabo <greg@tendermint.com>
|
||||||
|
|
||||||
# This is the release of tendermint to pull in.
|
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||||
ENV TM_VERSION 0.17.1
|
# (unless you change `genesis_file` in config.toml). You can put your config.toml and
|
||||||
ENV TM_SHA256SUM d57008c63d2d9176861137e38ed203da486febf20ae7d388fb810a75afff8f24
|
# private validator file into /tendermint/config.
|
||||||
|
|
||||||
# Tendermint will be looking for genesis file in /tendermint (unless you change
|
|
||||||
# `genesis_file` in config.toml). You can put your config.toml and private
|
|
||||||
# validator file into /tendermint.
|
|
||||||
#
|
#
|
||||||
# The /tendermint/data dir is used by tendermint to store state.
|
# The /tendermint/data dir is used by tendermint to store state.
|
||||||
ENV DATA_ROOT /tendermint
|
ENV TMHOME /tendermint
|
||||||
ENV TMHOME $DATA_ROOT
|
|
||||||
|
|
||||||
# Set user right away for determinism
|
|
||||||
RUN addgroup tmuser && \
|
|
||||||
adduser -S -G tmuser tmuser
|
|
||||||
|
|
||||||
# Create directory for persistence and give our user ownership
|
|
||||||
RUN mkdir -p $DATA_ROOT && \
|
|
||||||
chown -R tmuser:tmuser $DATA_ROOT
|
|
||||||
|
|
||||||
|
# OS environment setup
|
||||||
|
# Set user right away for determinism, create directory for persistence and give our user ownership
|
||||||
# jq and curl used for extracting `pub_key` from private validator while
|
# jq and curl used for extracting `pub_key` from private validator while
|
||||||
# deploying tendermint with Kubernetes. It is nice to have bash so the users
|
# deploying tendermint with Kubernetes. It is nice to have bash so the users
|
||||||
# could execute bash commands.
|
# could execute bash commands.
|
||||||
RUN apk add --no-cache bash curl jq
|
RUN apk update && \
|
||||||
|
apk upgrade && \
|
||||||
|
apk --no-cache add curl jq bash && \
|
||||||
|
addgroup tmuser && \
|
||||||
|
adduser -S -G tmuser tmuser -h "$TMHOME"
|
||||||
|
|
||||||
RUN apk add --no-cache openssl && \
|
# Run the container with tmuser by default. (UID=100, GID=1000)
|
||||||
wget https://github.com/tendermint/tendermint/releases/download/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \
|
USER tmuser
|
||||||
echo "${TM_SHA256SUM} tendermint_${TM_VERSION}_linux_amd64.zip" | sha256sum -c && \
|
|
||||||
unzip -d /bin tendermint_${TM_VERSION}_linux_amd64.zip && \
|
|
||||||
apk del openssl && \
|
|
||||||
rm -f tendermint_${TM_VERSION}_linux_amd64.zip
|
|
||||||
|
|
||||||
# Expose the data directory as a volume since there's mutable state in there
|
# Expose the data directory as a volume since there's mutable state in there
|
||||||
VOLUME $DATA_ROOT
|
VOLUME [ $TMHOME ]
|
||||||
|
|
||||||
# p2p port
|
WORKDIR $TMHOME
|
||||||
EXPOSE 46656
|
|
||||||
# rpc port
|
|
||||||
EXPOSE 46657
|
|
||||||
|
|
||||||
ENTRYPOINT ["tendermint"]
|
# p2p and rpc port
|
||||||
|
EXPOSE 46656 46657
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/bin/tendermint"]
|
||||||
CMD ["node", "--moniker=`hostname`"]
|
CMD ["node", "--moniker=`hostname`"]
|
||||||
|
STOPSIGNAL SIGTERM
|
||||||
|
|
||||||
|
ARG BINARY=tendermint
|
||||||
|
COPY $BINARY /usr/bin/tendermint
|
||||||
|
|
||||||
|
18
DOCKER/Dockerfile.testing
Normal file
18
DOCKER/Dockerfile.testing
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
FROM golang:1.10.1
|
||||||
|
|
||||||
|
|
||||||
|
# Grab deps (jq, hexdump, xxd, killall)
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
jq bsdmainutils vim-common psmisc netcat
|
||||||
|
|
||||||
|
# Add testing deps for curl
|
||||||
|
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list && \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends curl
|
||||||
|
|
||||||
|
VOLUME /go
|
||||||
|
|
||||||
|
EXPOSE 46656
|
||||||
|
EXPOSE 46657
|
||||||
|
|
@@ -7,6 +7,9 @@ push:
|
|||||||
build_develop:
|
build_develop:
|
||||||
docker build -t "tendermint/tendermint:develop" -f Dockerfile.develop .
|
docker build -t "tendermint/tendermint:develop" -f Dockerfile.develop .
|
||||||
|
|
||||||
|
build_testing:
|
||||||
|
docker build --tag tendermint/testing -f ./Dockerfile.testing .
|
||||||
|
|
||||||
push_develop:
|
push_develop:
|
||||||
docker push "tendermint/tendermint:develop"
|
docker push "tendermint/tendermint:develop"
|
||||||
|
|
||||||
|
@@ -17,7 +17,7 @@
|
|||||||
# Quick reference
|
# Quick reference
|
||||||
|
|
||||||
* **Where to get help:**
|
* **Where to get help:**
|
||||||
https://tendermint.com/community
|
https://cosmos.network/community
|
||||||
|
|
||||||
* **Where to file issues:**
|
* **Where to file issues:**
|
||||||
https://github.com/tendermint/tendermint/issues
|
https://github.com/tendermint/tendermint/issues
|
||||||
@@ -37,25 +37,29 @@ To get started developing applications, see the [application developers guide](h
|
|||||||
|
|
||||||
## Start one instance of the Tendermint core with the `kvstore` app
|
## Start one instance of the Tendermint core with the `kvstore` app
|
||||||
|
|
||||||
A very simple example of a built-in app and Tendermint core in one container.
|
A quick example of a built-in app and Tendermint core in one container.
|
||||||
|
|
||||||
```
|
```
|
||||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
|
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
|
||||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore
|
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore
|
||||||
```
|
```
|
||||||
|
|
||||||
## mintnet-kubernetes
|
# Local cluster
|
||||||
|
|
||||||
If you want to see many containers talking to each other, consider using [mintnet-kubernetes](https://github.com/tendermint/tools/tree/master/mintnet-kubernetes), which is a tool for running Tendermint-based applications on a Kubernetes cluster.
|
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/master/Makefile) and run:
|
||||||
|
|
||||||
|
```
|
||||||
|
make build-linux
|
||||||
|
make build-docker-localnode
|
||||||
|
make localnet-start
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that this will build and use a different image than the ones provided here.
|
||||||
|
|
||||||
# License
|
# License
|
||||||
|
|
||||||
View [license information](https://raw.githubusercontent.com/tendermint/tendermint/master/LICENSE) for the software contained in this image.
|
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/master/LICENSE).
|
||||||
|
|
||||||
# User Feedback
|
# Contributing
|
||||||
|
|
||||||
## Contributing
|
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information.
|
||||||
|
|
||||||
You are invited to contribute new features, fixes, or updates, large or small; we are always thrilled to receive pull requests, and do our best to process them as fast as we can.
|
|
||||||
|
|
||||||
Before you start to code, we recommend discussing your plans through a [GitHub](https://github.com/tendermint/tendermint/issues) issue, especially for more ambitious contributions. This gives other contributors a chance to point you in the right direction, give you feedback on your design, and help you find out if someone else is working on the same thing.
|
|
||||||
|
27
Gopkg.lock
generated
27
Gopkg.lock
generated
@@ -5,7 +5,7 @@
|
|||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/btcsuite/btcd"
|
name = "github.com/btcsuite/btcd"
|
||||||
packages = ["btcec"]
|
packages = ["btcec"]
|
||||||
revision = "2be2f12b358dc57d70b8f501b00be450192efbc3"
|
revision = "675abc5df3c5531bc741b56a765e35623459da6d"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/davecgh/go-spew"
|
name = "github.com/davecgh/go-spew"
|
||||||
@@ -238,8 +238,8 @@
|
|||||||
"server",
|
"server",
|
||||||
"types"
|
"types"
|
||||||
]
|
]
|
||||||
revision = "78a8905690ef54f9d57e3b2b0ee7ad3a04ef3f1f"
|
revision = "f9dce537281ffba5d1e047e6729429f7e5fb90c9"
|
||||||
version = "v0.10.3"
|
version = "v0.11.0-rc0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@@ -254,8 +254,8 @@
|
|||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/tendermint/go-amino"
|
name = "github.com/tendermint/go-amino"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "42246108ff925a457fb709475070a03dfd3e2b5c"
|
revision = "ed62928576cfcaf887209dc96142cd79cdfff389"
|
||||||
version = "0.9.6"
|
version = "0.9.9"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/tendermint/go-crypto"
|
name = "github.com/tendermint/go-crypto"
|
||||||
@@ -281,12 +281,10 @@
|
|||||||
"flowrate",
|
"flowrate",
|
||||||
"log",
|
"log",
|
||||||
"merkle",
|
"merkle",
|
||||||
"pubsub",
|
|
||||||
"pubsub/query",
|
|
||||||
"test"
|
"test"
|
||||||
]
|
]
|
||||||
revision = "97e1f1ad3f510048929a51475811a18686c894df"
|
revision = "cc5f287c4798ffe88c04d02df219ecb6932080fd"
|
||||||
version = "0.8.2-rc0"
|
version = "v0.8.3-rc0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@@ -301,13 +299,14 @@
|
|||||||
"ripemd160",
|
"ripemd160",
|
||||||
"salsa20/salsa"
|
"salsa20/salsa"
|
||||||
]
|
]
|
||||||
revision = "d6449816ce06963d9d136eee5a56fca5b0616e7e"
|
revision = "b0697eccbea9adec5b7ba8008f4c33d98d733388"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/net"
|
name = "golang.org/x/net"
|
||||||
packages = [
|
packages = [
|
||||||
"context",
|
"context",
|
||||||
|
"http/httpguts",
|
||||||
"http2",
|
"http2",
|
||||||
"http2/hpack",
|
"http2/hpack",
|
||||||
"idna",
|
"idna",
|
||||||
@@ -315,13 +314,13 @@
|
|||||||
"lex/httplex",
|
"lex/httplex",
|
||||||
"trace"
|
"trace"
|
||||||
]
|
]
|
||||||
revision = "61147c48b25b599e5b561d2e9c4f3e1ef489ca41"
|
revision = "5f9ae10d9af5b1c89ae6904293b14b064d4ada23"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/sys"
|
name = "golang.org/x/sys"
|
||||||
packages = ["unix"]
|
packages = ["unix"]
|
||||||
revision = "3b87a42e500a6dc65dae1a55d0b641295971163e"
|
revision = "bb9c189858d91f42db229b04d45a4c3d23a7662a"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "golang.org/x/text"
|
name = "golang.org/x/text"
|
||||||
@@ -348,7 +347,7 @@
|
|||||||
branch = "master"
|
branch = "master"
|
||||||
name = "google.golang.org/genproto"
|
name = "google.golang.org/genproto"
|
||||||
packages = ["googleapis/rpc/status"]
|
packages = ["googleapis/rpc/status"]
|
||||||
revision = "51d0944304c3cbce4afe9e5247e21100037bff78"
|
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "google.golang.org/grpc"
|
name = "google.golang.org/grpc"
|
||||||
@@ -383,6 +382,6 @@
|
|||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
inputs-digest = "e70f8692c825e80ae8510546e297840b9560d00e11b2272749a55cc2ffd147f0"
|
inputs-digest = "90dc14750c1499107a3e6728ae696f9977f56bee2855c2f1c0a14831a165cc0e"
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
@@ -71,7 +71,7 @@
|
|||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/tendermint/abci"
|
name = "github.com/tendermint/abci"
|
||||||
version = "~0.10.3"
|
version = "0.11.0-rc0"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/tendermint/go-crypto"
|
name = "github.com/tendermint/go-crypto"
|
||||||
@@ -79,11 +79,11 @@
|
|||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/tendermint/go-amino"
|
name = "github.com/tendermint/go-amino"
|
||||||
version = "~0.9.6"
|
version = "0.9.9"
|
||||||
|
|
||||||
[[constraint]]
|
[[override]]
|
||||||
name = "github.com/tendermint/tmlibs"
|
name = "github.com/tendermint/tmlibs"
|
||||||
version = "~0.8.2-rc0"
|
version = "~0.8.3-rc0"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "google.golang.org/grpc"
|
name = "google.golang.org/grpc"
|
||||||
|
46
Makefile
Normal file → Executable file
46
Makefile
Normal file → Executable file
@@ -178,6 +178,14 @@ metalinter_all:
|
|||||||
@echo "--> Running linter (all)"
|
@echo "--> Running linter (all)"
|
||||||
gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./...
|
gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./...
|
||||||
|
|
||||||
|
###########################################################
|
||||||
|
### Docker image
|
||||||
|
|
||||||
|
build-docker:
|
||||||
|
cp build/tendermint DOCKER/tendermint
|
||||||
|
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||||
|
rm -rf DOCKER/tendermint
|
||||||
|
|
||||||
###########################################################
|
###########################################################
|
||||||
### Local testnet using docker
|
### Local testnet using docker
|
||||||
|
|
||||||
@@ -185,18 +193,44 @@ metalinter_all:
|
|||||||
build-linux:
|
build-linux:
|
||||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||||
|
|
||||||
|
build-docker-localnode:
|
||||||
|
cd networks/local
|
||||||
|
make
|
||||||
|
|
||||||
# Run a 4-node testnet locally
|
# Run a 4-node testnet locally
|
||||||
docker-start:
|
localnet-start: localnet-stop
|
||||||
@echo "Wait until 'Attaching to node0, node1, node2, node3' message appears"
|
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
|
||||||
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v `pwd`/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
|
|
||||||
docker-compose up
|
docker-compose up
|
||||||
|
|
||||||
# Stop testnet
|
# Stop testnet
|
||||||
docker-stop:
|
localnet-stop:
|
||||||
docker-compose down
|
docker-compose down
|
||||||
|
|
||||||
|
###########################################################
|
||||||
|
### Remote full-nodes (sentry) using terraform and ansible
|
||||||
|
|
||||||
|
# Server management
|
||||||
|
sentry-start:
|
||||||
|
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||||
|
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
|
||||||
|
cd networks/remote/terraform && terraform init && terraform apply -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
|
||||||
|
@if ! [ -f $(CURDIR)/build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 0 --n 4 --o . ; fi
|
||||||
|
cd networks/remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml
|
||||||
|
@echo "Next step: Add your validator setup in the genesis.json and config.tml files and run \"make sentry-config\". (Public key of validator, chain ID, peer IP and node ID.)"
|
||||||
|
|
||||||
|
# Configuration management
|
||||||
|
sentry-config:
|
||||||
|
cd networks/remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$(CURDIR)/build/tendermint -e CONFIGDIR=$(CURDIR)/build
|
||||||
|
|
||||||
|
sentry-stop:
|
||||||
|
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||||
|
cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
|
||||||
|
|
||||||
|
# meant for the CI, inspect script & adapt accordingly
|
||||||
|
build-slate:
|
||||||
|
bash scripts/slate.sh
|
||||||
|
|
||||||
# To avoid unintended conflicts with file names, always add to .PHONY
|
# To avoid unintended conflicts with file names, always add to .PHONY
|
||||||
# unless there is a reason not to.
|
# unless there is a reason not to.
|
||||||
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
||||||
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux docker-start docker-stop
|
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate
|
||||||
|
|
||||||
|
31
README.md
31
README.md
@@ -24,7 +24,14 @@ _NOTE: This is alpha software. Please contact us if you intend to run it in prod
|
|||||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||||
and securely replicates it on many machines.
|
and securely replicates it on many machines.
|
||||||
|
|
||||||
For more information, from introduction to install to application development, [Read The Docs](https://tendermint.readthedocs.io/en/master/).
|
For protocol details, see [the specification](/docs/spec).
|
||||||
|
|
||||||
|
## Security
|
||||||
|
|
||||||
|
To report a security vulnerability, see our [bug bounty
|
||||||
|
program](https://tendermint.com/security).
|
||||||
|
|
||||||
|
For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.md)
|
||||||
|
|
||||||
## Minimum requirements
|
## Minimum requirements
|
||||||
|
|
||||||
@@ -34,19 +41,21 @@ Go version | Go1.9 or higher
|
|||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
To download pre-built binaries, see our [downloads page](https://tendermint.com/downloads).
|
See the [install instructions](/docs/install.rst)
|
||||||
|
|
||||||
To install from source, you should be able to:
|
## Quick Start
|
||||||
|
|
||||||
`go get -u github.com/tendermint/tendermint/cmd/tendermint`
|
- [Single node](/docs/using-tendermint.rst)
|
||||||
|
- [Local cluster using docker-compose](/networks/local)
|
||||||
For more details (or if it fails), [read the docs](https://tendermint.readthedocs.io/en/master/install.html).
|
- [Remote cluster using terraform and ansible](/docs/terraform-and-ansible.rst)
|
||||||
|
- [Join the public testnet](https://cosmos.network/testnet)
|
||||||
|
|
||||||
## Resources
|
## Resources
|
||||||
|
|
||||||
### Tendermint Core
|
### Tendermint Core
|
||||||
|
|
||||||
All resources involving the use of, building application on, or developing for, tendermint, can be found at [Read The Docs](https://tendermint.readthedocs.io/en/master/). Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs.
|
For more, [Read The Docs](https://tendermint.readthedocs.io/en/master/).
|
||||||
|
Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs.
|
||||||
|
|
||||||
### Sub-projects
|
### Sub-projects
|
||||||
|
|
||||||
@@ -61,8 +70,8 @@ All resources involving the use of, building application on, or developing for,
|
|||||||
|
|
||||||
### Applications
|
### Applications
|
||||||
|
|
||||||
* [Ethermint](http://github.com/tendermint/ethermint); Ethereum on Tendermint
|
|
||||||
* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||||
|
* [Ethermint](http://github.com/tendermint/ethermint); Ethereum on Tendermint
|
||||||
* [Many more](https://tendermint.readthedocs.io/en/master/ecosystem.html#abci-applications)
|
* [Many more](https://tendermint.readthedocs.io/en/master/ecosystem.html#abci-applications)
|
||||||
|
|
||||||
### More
|
### More
|
||||||
@@ -85,7 +94,11 @@ According to SemVer, anything in the public API can change at any time before ve
|
|||||||
|
|
||||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||||
to signal breaking changes across a subset of the total public API. This subset includes all
|
to signal breaking changes across a subset of the total public API. This subset includes all
|
||||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), as well as parts of the following packages:
|
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
|
||||||
|
include the in-process Go APIs.
|
||||||
|
|
||||||
|
That said, breaking changes in the following packages will be documented in the
|
||||||
|
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||||
|
|
||||||
- types
|
- types
|
||||||
- rpc/client
|
- rpc/client
|
||||||
|
23
ROADMAP.md
Normal file
23
ROADMAP.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# Roadmap
|
||||||
|
|
||||||
|
BREAKING CHANGES:
|
||||||
|
- Better support for injecting randomness
|
||||||
|
- Upgrade consensus for more real-time use of evidence
|
||||||
|
|
||||||
|
FEATURES:
|
||||||
|
- Use the chain as its own CA for nodes and validators
|
||||||
|
- Tooling to run multiple blockchains/apps, possibly in a single process
|
||||||
|
- State syncing (without transaction replay)
|
||||||
|
- Add authentication and rate-limitting to the RPC
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
- Improve subtleties around mempool caching and logic
|
||||||
|
- Consensus optimizations:
|
||||||
|
- cache block parts for faster agreement after round changes
|
||||||
|
- propagate block parts rarest first
|
||||||
|
- Better testing of the consensus state machine (ie. use a DSL)
|
||||||
|
- Auto compiled serialization/deserialization code instead of go-wire reflection
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
- Graceful handling/recovery for apps that have non-determinism or fail to halt
|
||||||
|
- Graceful handling/recovery for violations of safety, or liveness
|
71
SECURITY.md
Normal file
71
SECURITY.md
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# Security
|
||||||
|
|
||||||
|
As part of our [Coordinated Vulnerability Disclosure
|
||||||
|
Policy](https://tendermint.com/security), we operate a bug bounty.
|
||||||
|
See the policy for more details on submissions and rewards.
|
||||||
|
|
||||||
|
Here is a list of examples of the kinds of bugs we're most interested in:
|
||||||
|
|
||||||
|
## Specification
|
||||||
|
|
||||||
|
- Conceptual flaws
|
||||||
|
- Ambiguities, inconsistencies, or incorrect statements
|
||||||
|
- Mis-match between specification and implementation of any component
|
||||||
|
|
||||||
|
## Consensus
|
||||||
|
|
||||||
|
Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||||
|
|
||||||
|
- Validation of blockchain data structures, including blocks, block parts,
|
||||||
|
votes, and so on
|
||||||
|
- Execution of blocks
|
||||||
|
- Validator set changes
|
||||||
|
- Proposer round robin
|
||||||
|
- Two nodes committing conflicting blocks for the same height (safety failure)
|
||||||
|
- A correct node signing conflicting votes
|
||||||
|
- A node halting (liveness failure)
|
||||||
|
- Syncing new and old nodes
|
||||||
|
|
||||||
|
## Networking
|
||||||
|
|
||||||
|
- Authenticated encryption (MITM, information leakage)
|
||||||
|
- Eclipse attacks
|
||||||
|
- Sybil attacks
|
||||||
|
- Long-range attacks
|
||||||
|
- Denial-of-Service
|
||||||
|
|
||||||
|
## RPC
|
||||||
|
|
||||||
|
- Write-access to anything besides sending transactions
|
||||||
|
- Denial-of-Service
|
||||||
|
- Leakage of secrets
|
||||||
|
|
||||||
|
## Denial-of-Service
|
||||||
|
|
||||||
|
Attacks may come through the P2P network or the RPC:
|
||||||
|
|
||||||
|
- Amplification attacks
|
||||||
|
- Resource abuse
|
||||||
|
- Deadlocks and race conditions
|
||||||
|
- Panics and unhandled errors
|
||||||
|
|
||||||
|
## Libraries
|
||||||
|
|
||||||
|
- Serialization (Amino)
|
||||||
|
- Reading/Writing files and databases
|
||||||
|
- Logging and monitoring
|
||||||
|
|
||||||
|
## Cryptography
|
||||||
|
|
||||||
|
- Elliptic curves for validator signatures
|
||||||
|
- Hash algorithms and Merkle trees for block validation
|
||||||
|
- Authenticated encryption for P2P connections
|
||||||
|
|
||||||
|
## Light Client
|
||||||
|
|
||||||
|
- Validation of blockchain data structures
|
||||||
|
- Correctly validating an incorrect proof
|
||||||
|
- Incorrectly validating a correct proof
|
||||||
|
- Syncing validator set changes
|
||||||
|
|
||||||
|
|
34
Vagrantfile
vendored
34
Vagrantfile
vendored
@@ -10,31 +10,37 @@ Vagrant.configure("2") do |config|
|
|||||||
end
|
end
|
||||||
|
|
||||||
config.vm.provision "shell", inline: <<-SHELL
|
config.vm.provision "shell", inline: <<-SHELL
|
||||||
# add docker repo
|
apt-get update
|
||||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
|
||||||
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable"
|
|
||||||
|
|
||||||
# and golang 1.9 support
|
|
||||||
# official repo doesn't have race detection runtime...
|
|
||||||
# add-apt-repository ppa:gophers/archive
|
|
||||||
add-apt-repository ppa:longsleep/golang-backports
|
|
||||||
|
|
||||||
# install base requirements
|
# install base requirements
|
||||||
apt-get update
|
|
||||||
apt-get install -y --no-install-recommends wget curl jq zip \
|
apt-get install -y --no-install-recommends wget curl jq zip \
|
||||||
make shellcheck bsdmainutils psmisc
|
make shellcheck bsdmainutils psmisc
|
||||||
apt-get install -y docker-ce golang-1.9-go
|
|
||||||
apt-get install -y language-pack-en
|
apt-get install -y language-pack-en
|
||||||
|
|
||||||
|
# install docker
|
||||||
|
apt-get install -y --no-install-recommends apt-transport-https \
|
||||||
|
ca-certificates curl software-properties-common
|
||||||
|
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||||
|
add-apt-repository \
|
||||||
|
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||||
|
$(lsb_release -cs) \
|
||||||
|
stable"
|
||||||
|
apt-get install -y docker-ce
|
||||||
|
usermod -a -G docker vagrant
|
||||||
|
|
||||||
|
# install go
|
||||||
|
wget -q https://dl.google.com/go/go1.10.1.linux-amd64.tar.gz
|
||||||
|
tar -xvf go1.10.1.linux-amd64.tar.gz
|
||||||
|
mv go /usr/local
|
||||||
|
rm -f go1.10.1.linux-amd64.tar.gz
|
||||||
|
|
||||||
# cleanup
|
# cleanup
|
||||||
apt-get autoremove -y
|
apt-get autoremove -y
|
||||||
|
|
||||||
# needed for docker
|
|
||||||
usermod -a -G docker vagrant
|
|
||||||
|
|
||||||
# set env variables
|
# set env variables
|
||||||
echo 'export PATH=$PATH:/usr/lib/go-1.9/bin:/home/vagrant/go/bin' >> /home/vagrant/.bash_profile
|
echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile
|
||||||
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
|
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
|
||||||
|
echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile
|
||||||
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
|
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
|
||||||
echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile
|
echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile
|
||||||
|
|
||||||
|
@@ -32,7 +32,7 @@ func BenchmarkEncodeStatusWire(b *testing.B) {
|
|||||||
LatestBlockTime: time.Unix(0, 1234),
|
LatestBlockTime: time.Unix(0, 1234),
|
||||||
},
|
},
|
||||||
ValidatorInfo: ctypes.ValidatorInfo{
|
ValidatorInfo: ctypes.ValidatorInfo{
|
||||||
PubKey: nodeKey.PubKey(),
|
PubKey: nodeKey.PubKey(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
|
@@ -1,6 +1,7 @@
|
|||||||
package blockchain
|
package blockchain
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
@@ -204,3 +205,4 @@ func (tp *bcrTestPeer) IsOutbound() bool { return false }
|
|||||||
func (tp *bcrTestPeer) IsPersistent() bool { return true }
|
func (tp *bcrTestPeer) IsPersistent() bool { return true }
|
||||||
func (tp *bcrTestPeer) Get(s string) interface{} { return s }
|
func (tp *bcrTestPeer) Get(s string) interface{} { return s }
|
||||||
func (tp *bcrTestPeer) Set(string, interface{}) {}
|
func (tp *bcrTestPeer) Set(string, interface{}) {}
|
||||||
|
func (tp *bcrTestPeer) RemoteIP() net.IP { return []byte{127, 0, 0, 1} }
|
||||||
|
@@ -97,7 +97,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
|||||||
|
|
||||||
incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2})
|
incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2})
|
||||||
uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0})
|
uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0})
|
||||||
uncontiguousPartSet.AddPart(part2, false)
|
uncontiguousPartSet.AddPart(part2)
|
||||||
|
|
||||||
header1 := types.Header{
|
header1 := types.Header{
|
||||||
Height: 1,
|
Height: 1,
|
||||||
|
@@ -1,6 +1,8 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
cfg "github.com/tendermint/tendermint/config"
|
cfg "github.com/tendermint/tendermint/config"
|
||||||
@@ -50,7 +52,8 @@ func initFilesWithConfig(config *cfg.Config) error {
|
|||||||
logger.Info("Found genesis file", "path", genFile)
|
logger.Info("Found genesis file", "path", genFile)
|
||||||
} else {
|
} else {
|
||||||
genDoc := types.GenesisDoc{
|
genDoc := types.GenesisDoc{
|
||||||
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
|
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
|
||||||
|
GenesisTime: time.Now(),
|
||||||
}
|
}
|
||||||
genDoc.Validators = []types.GenesisValidator{{
|
genDoc.Validators = []types.GenesisValidator{{
|
||||||
PubKey: pv.GetPubKey(),
|
PubKey: pv.GetPubKey(),
|
||||||
|
@@ -6,7 +6,6 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
"github.com/tendermint/tendermint/p2p"
|
"github.com/tendermint/tendermint/p2p"
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ShowNodeIDCmd dumps node's ID to the standard output.
|
// ShowNodeIDCmd dumps node's ID to the standard output.
|
||||||
|
@@ -2,6 +2,7 @@ package commands
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
privval "github.com/tendermint/tendermint/types/priv_validator"
|
privval "github.com/tendermint/tendermint/types/priv_validator"
|
||||||
|
@@ -57,7 +57,18 @@ func init() {
|
|||||||
var TestnetFilesCmd = &cobra.Command{
|
var TestnetFilesCmd = &cobra.Command{
|
||||||
Use: "testnet",
|
Use: "testnet",
|
||||||
Short: "Initialize files for a Tendermint testnet",
|
Short: "Initialize files for a Tendermint testnet",
|
||||||
RunE: testnetFiles,
|
Long: `testnet will create "v" + "n" number of directories and populate each with
|
||||||
|
necessary files (private validator, genesis, config, etc.).
|
||||||
|
|
||||||
|
Note, strict routability for addresses is turned off in the config file.
|
||||||
|
|
||||||
|
Optionally, it will fill in persistent_peers list in config file using either hostnames or IPs.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
tendermint testnet --v 4 --o ./output --populate-persistent-peers --starting-ip-address 192.168.10.2
|
||||||
|
`,
|
||||||
|
RunE: testnetFiles,
|
||||||
}
|
}
|
||||||
|
|
||||||
func testnetFiles(cmd *cobra.Command, args []string) error {
|
func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||||
@@ -162,6 +173,7 @@ func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error {
|
|||||||
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
|
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
|
||||||
config.SetRoot(nodeDir)
|
config.SetRoot(nodeDir)
|
||||||
config.P2P.PersistentPeers = persistentPeersList
|
config.P2P.PersistentPeers = persistentPeersList
|
||||||
|
config.P2P.AddrBookStrict = false
|
||||||
|
|
||||||
// overwrite default config
|
// overwrite default config
|
||||||
cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config)
|
cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config)
|
||||||
|
@@ -335,6 +335,7 @@ type MempoolConfig struct {
|
|||||||
RecheckEmpty bool `mapstructure:"recheck_empty"`
|
RecheckEmpty bool `mapstructure:"recheck_empty"`
|
||||||
Broadcast bool `mapstructure:"broadcast"`
|
Broadcast bool `mapstructure:"broadcast"`
|
||||||
WalPath string `mapstructure:"wal_dir"`
|
WalPath string `mapstructure:"wal_dir"`
|
||||||
|
Size int `mapstructure:"size"`
|
||||||
CacheSize int `mapstructure:"cache_size"`
|
CacheSize int `mapstructure:"cache_size"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -345,6 +346,7 @@ func DefaultMempoolConfig() *MempoolConfig {
|
|||||||
RecheckEmpty: true,
|
RecheckEmpty: true,
|
||||||
Broadcast: true,
|
Broadcast: true,
|
||||||
WalPath: filepath.Join(defaultDataDir, "mempool.wal"),
|
WalPath: filepath.Join(defaultDataDir, "mempool.wal"),
|
||||||
|
Size: 100000,
|
||||||
CacheSize: 100000,
|
CacheSize: 100000,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -367,10 +369,9 @@ func (cfg *MempoolConfig) WalDir() string {
|
|||||||
// ConsensusConfig defines the confuguration for the Tendermint consensus service,
|
// ConsensusConfig defines the confuguration for the Tendermint consensus service,
|
||||||
// including timeouts and details about the WAL and the block structure.
|
// including timeouts and details about the WAL and the block structure.
|
||||||
type ConsensusConfig struct {
|
type ConsensusConfig struct {
|
||||||
RootDir string `mapstructure:"home"`
|
RootDir string `mapstructure:"home"`
|
||||||
WalPath string `mapstructure:"wal_file"`
|
WalPath string `mapstructure:"wal_file"`
|
||||||
WalLight bool `mapstructure:"wal_light"`
|
walFile string // overrides WalPath if set
|
||||||
walFile string // overrides WalPath if set
|
|
||||||
|
|
||||||
// All timeouts are in milliseconds
|
// All timeouts are in milliseconds
|
||||||
TimeoutPropose int `mapstructure:"timeout_propose"`
|
TimeoutPropose int `mapstructure:"timeout_propose"`
|
||||||
@@ -401,7 +402,6 @@ type ConsensusConfig struct {
|
|||||||
func DefaultConsensusConfig() *ConsensusConfig {
|
func DefaultConsensusConfig() *ConsensusConfig {
|
||||||
return &ConsensusConfig{
|
return &ConsensusConfig{
|
||||||
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
|
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
|
||||||
WalLight: false,
|
|
||||||
TimeoutPropose: 3000,
|
TimeoutPropose: 3000,
|
||||||
TimeoutProposeDelta: 500,
|
TimeoutProposeDelta: 500,
|
||||||
TimeoutPrevote: 1000,
|
TimeoutPrevote: 1000,
|
||||||
|
@@ -37,13 +37,13 @@ func EnsureRoot(rootDir string) {
|
|||||||
|
|
||||||
// Write default config file if missing.
|
// Write default config file if missing.
|
||||||
if !cmn.FileExists(configFilePath) {
|
if !cmn.FileExists(configFilePath) {
|
||||||
writeDefaultCondigFile(configFilePath)
|
writeDefaultConfigFile(configFilePath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: this func should probably be called by cmd/tendermint/commands/init.go
|
// XXX: this func should probably be called by cmd/tendermint/commands/init.go
|
||||||
// alongside the writing of the genesis.json and priv_validator.json
|
// alongside the writing of the genesis.json and priv_validator.json
|
||||||
func writeDefaultCondigFile(configFilePath string) {
|
func writeDefaultConfigFile(configFilePath string) {
|
||||||
WriteConfigFile(configFilePath, DefaultConfig())
|
WriteConfigFile(configFilePath, DefaultConfig())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -179,11 +179,16 @@ recheck_empty = {{ .Mempool.RecheckEmpty }}
|
|||||||
broadcast = {{ .Mempool.Broadcast }}
|
broadcast = {{ .Mempool.Broadcast }}
|
||||||
wal_dir = "{{ .Mempool.WalPath }}"
|
wal_dir = "{{ .Mempool.WalPath }}"
|
||||||
|
|
||||||
|
# size of the mempool
|
||||||
|
size = {{ .Mempool.Size }}
|
||||||
|
|
||||||
|
# size of the cache (used to filter transactions we saw earlier)
|
||||||
|
cache_size = {{ .Mempool.CacheSize }}
|
||||||
|
|
||||||
##### consensus configuration options #####
|
##### consensus configuration options #####
|
||||||
[consensus]
|
[consensus]
|
||||||
|
|
||||||
wal_file = "{{ .Consensus.WalPath }}"
|
wal_file = "{{ .Consensus.WalPath }}"
|
||||||
wal_light = {{ .Consensus.WalLight }}
|
|
||||||
|
|
||||||
# All timeouts are in milliseconds
|
# All timeouts are in milliseconds
|
||||||
timeout_propose = {{ .Consensus.TimeoutPropose }}
|
timeout_propose = {{ .Consensus.TimeoutPropose }}
|
||||||
@@ -267,7 +272,7 @@ func ResetTestRoot(testName string) *Config {
|
|||||||
|
|
||||||
// Write default config file if missing.
|
// Write default config file if missing.
|
||||||
if !cmn.FileExists(configFilePath) {
|
if !cmn.FileExists(configFilePath) {
|
||||||
writeDefaultCondigFile(configFilePath)
|
writeDefaultConfigFile(configFilePath)
|
||||||
}
|
}
|
||||||
if !cmn.FileExists(genesisFilePath) {
|
if !cmn.FileExists(genesisFilePath) {
|
||||||
cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
|
cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
|
||||||
|
@@ -1,18 +1 @@
|
|||||||
# The core consensus algorithm.
|
See the [consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus) for more information.
|
||||||
|
|
||||||
* state.go - The state machine as detailed in the whitepaper
|
|
||||||
* reactor.go - A reactor that connects the state machine to the gossip network
|
|
||||||
|
|
||||||
# Go-routine summary
|
|
||||||
|
|
||||||
The reactor runs 2 go-routines for each added peer: gossipDataRoutine and gossipVotesRoutine.
|
|
||||||
|
|
||||||
The consensus state runs two persistent go-routines: timeoutRoutine and receiveRoutine.
|
|
||||||
Go-routines are also started to trigger timeouts and to avoid blocking when the internalMsgQueue is really backed up.
|
|
||||||
|
|
||||||
# Replay/WAL
|
|
||||||
|
|
||||||
A write-ahead log is used to record all messages processed by the receiveRoutine,
|
|
||||||
which amounts to all inputs to the consensus state machine:
|
|
||||||
messages from peers, messages from ourselves, and timeouts.
|
|
||||||
They can be played back deterministically at startup or using the replay console.
|
|
||||||
|
@@ -27,7 +27,7 @@ func init() {
|
|||||||
// Heal partition and ensure A sees the commit
|
// Heal partition and ensure A sees the commit
|
||||||
func TestByzantine(t *testing.T) {
|
func TestByzantine(t *testing.T) {
|
||||||
N := 4
|
N := 4
|
||||||
logger := consensusLogger()
|
logger := consensusLogger().With("test", "byzantine")
|
||||||
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
|
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
|
||||||
|
|
||||||
// give the byzantine validator a normal ticker
|
// give the byzantine validator a normal ticker
|
||||||
|
@@ -264,7 +264,7 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
|
|||||||
// mock the evidence pool
|
// mock the evidence pool
|
||||||
evpool := types.MockEvidencePool{}
|
evpool := types.MockEvidencePool{}
|
||||||
|
|
||||||
// Make ConsensusReactor
|
// Make ConsensusState
|
||||||
stateDB := dbm.NewMemDB()
|
stateDB := dbm.NewMemDB()
|
||||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
|
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
|
||||||
cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||||
|
@@ -1,7 +1,6 @@
|
|||||||
package consensus
|
package consensus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -14,6 +13,7 @@ import (
|
|||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
|
|
||||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||||
|
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||||
"github.com/tendermint/tendermint/p2p"
|
"github.com/tendermint/tendermint/p2p"
|
||||||
sm "github.com/tendermint/tendermint/state"
|
sm "github.com/tendermint/tendermint/state"
|
||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
@@ -43,7 +43,8 @@ type ConsensusReactor struct {
|
|||||||
eventBus *types.EventBus
|
eventBus *types.EventBus
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConsensusReactor returns a new ConsensusReactor with the given consensusState.
|
// NewConsensusReactor returns a new ConsensusReactor with the given
|
||||||
|
// consensusState.
|
||||||
func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *ConsensusReactor {
|
func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *ConsensusReactor {
|
||||||
conR := &ConsensusReactor{
|
conR := &ConsensusReactor{
|
||||||
conS: consensusState,
|
conS: consensusState,
|
||||||
@@ -53,17 +54,15 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *Consens
|
|||||||
return conR
|
return conR
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnStart implements BaseService.
|
// OnStart implements BaseService by subscribing to events, which later will be
|
||||||
|
// broadcasted to other peers and starting state if we're not in fast sync.
|
||||||
func (conR *ConsensusReactor) OnStart() error {
|
func (conR *ConsensusReactor) OnStart() error {
|
||||||
conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync())
|
conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync())
|
||||||
if err := conR.BaseReactor.OnStart(); err != nil {
|
if err := conR.BaseReactor.OnStart(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err := conR.startBroadcastRoutine()
|
conR.subscribeToBroadcastEvents()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !conR.FastSync() {
|
if !conR.FastSync() {
|
||||||
err := conR.conS.Start()
|
err := conR.conS.Start()
|
||||||
@@ -75,9 +74,11 @@ func (conR *ConsensusReactor) OnStart() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnStop implements BaseService
|
// OnStop implements BaseService by unsubscribing from events and stopping
|
||||||
|
// state.
|
||||||
func (conR *ConsensusReactor) OnStop() {
|
func (conR *ConsensusReactor) OnStop() {
|
||||||
conR.BaseReactor.OnStop()
|
conR.BaseReactor.OnStop()
|
||||||
|
conR.unsubscribeFromBroadcastEvents()
|
||||||
conR.conS.Stop()
|
conR.conS.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,6 +102,7 @@ func (conR *ConsensusReactor) SwitchToConsensus(state sm.State, blocksSynced int
|
|||||||
err := conR.conS.Start()
|
err := conR.conS.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conR.Logger.Error("Error starting conS", "err", err)
|
conR.Logger.Error("Error starting conS", "err", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -210,7 +212,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Peer claims to have a maj23 for some BlockID at H,R,S,
|
// Peer claims to have a maj23 for some BlockID at H,R,S,
|
||||||
err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.Peer.ID(), msg.BlockID)
|
err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conR.Switch.StopPeerForError(src, err)
|
conR.Switch.StopPeerForError(src, err)
|
||||||
return
|
return
|
||||||
@@ -345,77 +347,40 @@ func (conR *ConsensusReactor) FastSync() bool {
|
|||||||
|
|
||||||
//--------------------------------------
|
//--------------------------------------
|
||||||
|
|
||||||
// startBroadcastRoutine subscribes for new round steps, votes and proposal
|
// subscribeToBroadcastEvents subscribes for new round steps, votes and
|
||||||
// heartbeats using the event bus and starts a go routine to broadcasts events
|
// proposal heartbeats using internal pubsub defined on state to broadcast
|
||||||
// to peers upon receiving them.
|
// them to peers upon receiving.
|
||||||
func (conR *ConsensusReactor) startBroadcastRoutine() error {
|
func (conR *ConsensusReactor) subscribeToBroadcastEvents() {
|
||||||
const subscriber = "consensus-reactor"
|
const subscriber = "consensus-reactor"
|
||||||
ctx := context.Background()
|
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep,
|
||||||
|
func(data tmevents.EventData) {
|
||||||
|
conR.broadcastNewRoundStepMessages(data.(*cstypes.RoundState))
|
||||||
|
})
|
||||||
|
|
||||||
// new round steps
|
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote,
|
||||||
stepsCh := make(chan interface{})
|
func(data tmevents.EventData) {
|
||||||
err := conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, stepsCh)
|
conR.broadcastHasVoteMessage(data.(*types.Vote))
|
||||||
if err != nil {
|
})
|
||||||
return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryNewRoundStep)
|
|
||||||
}
|
|
||||||
|
|
||||||
// votes
|
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventProposalHeartbeat,
|
||||||
votesCh := make(chan interface{})
|
func(data tmevents.EventData) {
|
||||||
err = conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryVote, votesCh)
|
conR.broadcastProposalHeartbeatMessage(data.(*types.Heartbeat))
|
||||||
if err != nil {
|
})
|
||||||
return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryVote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// proposal heartbeats
|
|
||||||
heartbeatsCh := make(chan interface{})
|
|
||||||
err = conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryProposalHeartbeat, heartbeatsCh)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryProposalHeartbeat)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
var data interface{}
|
|
||||||
var ok bool
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case data, ok = <-stepsCh:
|
|
||||||
if ok { // a receive from a closed channel returns the zero value immediately
|
|
||||||
edrs := data.(types.EventDataRoundState)
|
|
||||||
conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState))
|
|
||||||
}
|
|
||||||
case data, ok = <-votesCh:
|
|
||||||
if ok {
|
|
||||||
edv := data.(types.EventDataVote)
|
|
||||||
conR.broadcastHasVoteMessage(edv.Vote)
|
|
||||||
}
|
|
||||||
case data, ok = <-heartbeatsCh:
|
|
||||||
if ok {
|
|
||||||
edph := data.(types.EventDataProposalHeartbeat)
|
|
||||||
conR.broadcastProposalHeartbeatMessage(edph)
|
|
||||||
}
|
|
||||||
case <-conR.Quit():
|
|
||||||
conR.eventBus.UnsubscribeAll(ctx, subscriber)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
conR.eventBus.UnsubscribeAll(ctx, subscriber)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(heartbeat types.EventDataProposalHeartbeat) {
|
func (conR *ConsensusReactor) unsubscribeFromBroadcastEvents() {
|
||||||
hb := heartbeat.Heartbeat
|
const subscriber = "consensus-reactor"
|
||||||
|
conR.conS.evsw.RemoveListener(subscriber)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(hb *types.Heartbeat) {
|
||||||
conR.Logger.Debug("Broadcasting proposal heartbeat message",
|
conR.Logger.Debug("Broadcasting proposal heartbeat message",
|
||||||
"height", hb.Height, "round", hb.Round, "sequence", hb.Sequence)
|
"height", hb.Height, "round", hb.Round, "sequence", hb.Sequence)
|
||||||
msg := &ProposalHeartbeatMessage{hb}
|
msg := &ProposalHeartbeatMessage{hb}
|
||||||
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
|
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (conR *ConsensusReactor) broadcastNewRoundStep(rs *cstypes.RoundState) {
|
func (conR *ConsensusReactor) broadcastNewRoundStepMessages(rs *cstypes.RoundState) {
|
||||||
nrsMsg, csMsg := makeRoundStepMessages(rs)
|
nrsMsg, csMsg := makeRoundStepMessages(rs)
|
||||||
if nrsMsg != nil {
|
if nrsMsg != nil {
|
||||||
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
|
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
|
||||||
@@ -696,20 +661,37 @@ func (conR *ConsensusReactor) gossipVotesForHeight(logger log.Logger, rs *cstype
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// If there are POL prevotes to send...
|
||||||
|
if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 {
|
||||||
|
if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
|
||||||
|
if ps.PickSendVote(polPrevotes) {
|
||||||
|
logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send",
|
||||||
|
"round", prs.ProposalPOLRound)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
// If there are prevotes to send...
|
// If there are prevotes to send...
|
||||||
if prs.Step <= cstypes.RoundStepPrevote && prs.Round != -1 && prs.Round <= rs.Round {
|
if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round {
|
||||||
if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
|
if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
|
||||||
logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round)
|
logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If there are precommits to send...
|
// If there are precommits to send...
|
||||||
if prs.Step <= cstypes.RoundStepPrecommit && prs.Round != -1 && prs.Round <= rs.Round {
|
if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round {
|
||||||
if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) {
|
if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) {
|
||||||
logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round)
|
logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// If there are prevotes to send...Needed because of validBlock mechanism
|
||||||
|
if prs.Round != -1 && prs.Round <= rs.Round {
|
||||||
|
if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
|
||||||
|
logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
// If there are POLPrevotes to send...
|
// If there are POLPrevotes to send...
|
||||||
if prs.ProposalPOLRound != -1 {
|
if prs.ProposalPOLRound != -1 {
|
||||||
if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
|
if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
|
||||||
@@ -720,6 +702,7 @@ func (conR *ConsensusReactor) gossipVotesForHeight(logger log.Logger, rs *cstype
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -840,41 +823,42 @@ var (
|
|||||||
|
|
||||||
// PeerState contains the known state of a peer, including its connection and
|
// PeerState contains the known state of a peer, including its connection and
|
||||||
// threadsafe access to its PeerRoundState.
|
// threadsafe access to its PeerRoundState.
|
||||||
|
// NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go.
|
||||||
|
// Be mindful of what you Expose.
|
||||||
type PeerState struct {
|
type PeerState struct {
|
||||||
Peer p2p.Peer
|
peer p2p.Peer
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
|
||||||
mtx sync.Mutex
|
mtx sync.Mutex `json:"-"` // NOTE: Modify below using setters, never directly.
|
||||||
cstypes.PeerRoundState
|
PRS cstypes.PeerRoundState `json:"round_state"` // Exposed.
|
||||||
|
Stats *peerStateStats `json:"stats"` // Exposed.
|
||||||
stats *peerStateStats
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// peerStateStats holds internal statistics for a peer.
|
// peerStateStats holds internal statistics for a peer.
|
||||||
type peerStateStats struct {
|
type peerStateStats struct {
|
||||||
lastVoteHeight int64
|
LastVoteHeight int64 `json:"last_vote_height"`
|
||||||
votes int
|
Votes int `json:"votes"`
|
||||||
|
LastBlockPartHeight int64 `json:"last_block_part_height"`
|
||||||
lastBlockPartHeight int64
|
BlockParts int `json:"block_parts"`
|
||||||
blockParts int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pss peerStateStats) String() string {
|
func (pss peerStateStats) String() string {
|
||||||
return fmt.Sprintf("peerStateStats{votes: %d, blockParts: %d}", pss.votes, pss.blockParts)
|
return fmt.Sprintf("peerStateStats{lvh: %d, votes: %d, lbph: %d, blockParts: %d}",
|
||||||
|
pss.LastVoteHeight, pss.Votes, pss.LastBlockPartHeight, pss.BlockParts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPeerState returns a new PeerState for the given Peer
|
// NewPeerState returns a new PeerState for the given Peer
|
||||||
func NewPeerState(peer p2p.Peer) *PeerState {
|
func NewPeerState(peer p2p.Peer) *PeerState {
|
||||||
return &PeerState{
|
return &PeerState{
|
||||||
Peer: peer,
|
peer: peer,
|
||||||
logger: log.NewNopLogger(),
|
logger: log.NewNopLogger(),
|
||||||
PeerRoundState: cstypes.PeerRoundState{
|
PRS: cstypes.PeerRoundState{
|
||||||
Round: -1,
|
Round: -1,
|
||||||
ProposalPOLRound: -1,
|
ProposalPOLRound: -1,
|
||||||
LastCommitRound: -1,
|
LastCommitRound: -1,
|
||||||
CatchupCommitRound: -1,
|
CatchupCommitRound: -1,
|
||||||
},
|
},
|
||||||
stats: &peerStateStats{},
|
Stats: &peerStateStats{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -891,16 +875,16 @@ func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState {
|
|||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
prs := ps.PeerRoundState // copy
|
prs := ps.PRS // copy
|
||||||
return &prs
|
return &prs
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRoundStateJSON returns a json of PeerRoundState, marshalled using go-amino.
|
// ToJSON returns a json of PeerState, marshalled using go-amino.
|
||||||
func (ps *PeerState) GetRoundStateJSON() ([]byte, error) {
|
func (ps *PeerState) ToJSON() ([]byte, error) {
|
||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
return cdc.MarshalJSON(ps.PeerRoundState)
|
return cdc.MarshalJSON(ps)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHeight returns an atomic snapshot of the PeerRoundState's height
|
// GetHeight returns an atomic snapshot of the PeerRoundState's height
|
||||||
@@ -908,7 +892,7 @@ func (ps *PeerState) GetRoundStateJSON() ([]byte, error) {
|
|||||||
func (ps *PeerState) GetHeight() int64 {
|
func (ps *PeerState) GetHeight() int64 {
|
||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
return ps.PeerRoundState.Height
|
return ps.PRS.Height
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetHasProposal sets the given proposal as known for the peer.
|
// SetHasProposal sets the given proposal as known for the peer.
|
||||||
@@ -916,18 +900,18 @@ func (ps *PeerState) SetHasProposal(proposal *types.Proposal) {
|
|||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
if ps.Height != proposal.Height || ps.Round != proposal.Round {
|
if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if ps.Proposal {
|
if ps.PRS.Proposal {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ps.Proposal = true
|
ps.PRS.Proposal = true
|
||||||
ps.ProposalBlockPartsHeader = proposal.BlockPartsHeader
|
ps.PRS.ProposalBlockPartsHeader = proposal.BlockPartsHeader
|
||||||
ps.ProposalBlockParts = cmn.NewBitArray(proposal.BlockPartsHeader.Total)
|
ps.PRS.ProposalBlockParts = cmn.NewBitArray(proposal.BlockPartsHeader.Total)
|
||||||
ps.ProposalPOLRound = proposal.POLRound
|
ps.PRS.ProposalPOLRound = proposal.POLRound
|
||||||
ps.ProposalPOL = nil // Nil until ProposalPOLMessage received.
|
ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received.
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitProposalBlockParts initializes the peer's proposal block parts header and bit array.
|
// InitProposalBlockParts initializes the peer's proposal block parts header and bit array.
|
||||||
@@ -935,12 +919,12 @@ func (ps *PeerState) InitProposalBlockParts(partsHeader types.PartSetHeader) {
|
|||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
if ps.ProposalBlockParts != nil {
|
if ps.PRS.ProposalBlockParts != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ps.ProposalBlockPartsHeader = partsHeader
|
ps.PRS.ProposalBlockPartsHeader = partsHeader
|
||||||
ps.ProposalBlockParts = cmn.NewBitArray(partsHeader.Total)
|
ps.PRS.ProposalBlockParts = cmn.NewBitArray(partsHeader.Total)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetHasProposalBlockPart sets the given block part index as known for the peer.
|
// SetHasProposalBlockPart sets the given block part index as known for the peer.
|
||||||
@@ -948,11 +932,11 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int, index int)
|
|||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
if ps.Height != height || ps.Round != round {
|
if ps.PRS.Height != height || ps.PRS.Round != round {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ps.ProposalBlockParts.SetIndex(index, true)
|
ps.PRS.ProposalBlockParts.SetIndex(index, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PickSendVote picks a vote and sends it to the peer.
|
// PickSendVote picks a vote and sends it to the peer.
|
||||||
@@ -961,7 +945,7 @@ func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
|
|||||||
if vote, ok := ps.PickVoteToSend(votes); ok {
|
if vote, ok := ps.PickVoteToSend(votes); ok {
|
||||||
msg := &VoteMessage{vote}
|
msg := &VoteMessage{vote}
|
||||||
ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
|
ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
|
||||||
return ps.Peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg))
|
return ps.peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg))
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -1001,40 +985,40 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.B
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if ps.Height == height {
|
if ps.PRS.Height == height {
|
||||||
if ps.Round == round {
|
if ps.PRS.Round == round {
|
||||||
switch type_ {
|
switch type_ {
|
||||||
case types.VoteTypePrevote:
|
case types.VoteTypePrevote:
|
||||||
return ps.Prevotes
|
return ps.PRS.Prevotes
|
||||||
case types.VoteTypePrecommit:
|
case types.VoteTypePrecommit:
|
||||||
return ps.Precommits
|
return ps.PRS.Precommits
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ps.CatchupCommitRound == round {
|
if ps.PRS.CatchupCommitRound == round {
|
||||||
switch type_ {
|
switch type_ {
|
||||||
case types.VoteTypePrevote:
|
case types.VoteTypePrevote:
|
||||||
return nil
|
return nil
|
||||||
case types.VoteTypePrecommit:
|
case types.VoteTypePrecommit:
|
||||||
return ps.CatchupCommit
|
return ps.PRS.CatchupCommit
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ps.ProposalPOLRound == round {
|
if ps.PRS.ProposalPOLRound == round {
|
||||||
switch type_ {
|
switch type_ {
|
||||||
case types.VoteTypePrevote:
|
case types.VoteTypePrevote:
|
||||||
return ps.ProposalPOL
|
return ps.PRS.ProposalPOL
|
||||||
case types.VoteTypePrecommit:
|
case types.VoteTypePrecommit:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if ps.Height == height+1 {
|
if ps.PRS.Height == height+1 {
|
||||||
if ps.LastCommitRound == round {
|
if ps.PRS.LastCommitRound == round {
|
||||||
switch type_ {
|
switch type_ {
|
||||||
case types.VoteTypePrevote:
|
case types.VoteTypePrevote:
|
||||||
return nil
|
return nil
|
||||||
case types.VoteTypePrecommit:
|
case types.VoteTypePrecommit:
|
||||||
return ps.LastCommit
|
return ps.PRS.LastCommit
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -1044,7 +1028,7 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.B
|
|||||||
|
|
||||||
// 'round': A round for which we have a +2/3 commit.
|
// 'round': A round for which we have a +2/3 commit.
|
||||||
func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValidators int) {
|
func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValidators int) {
|
||||||
if ps.Height != height {
|
if ps.PRS.Height != height {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@@ -1054,14 +1038,14 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida
|
|||||||
cmn.PanicSanity(cmn.Fmt("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
|
cmn.PanicSanity(cmn.Fmt("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
if ps.CatchupCommitRound == round {
|
if ps.PRS.CatchupCommitRound == round {
|
||||||
return // Nothing to do!
|
return // Nothing to do!
|
||||||
}
|
}
|
||||||
ps.CatchupCommitRound = round
|
ps.PRS.CatchupCommitRound = round
|
||||||
if round == ps.Round {
|
if round == ps.PRS.Round {
|
||||||
ps.CatchupCommit = ps.Precommits
|
ps.PRS.CatchupCommit = ps.PRS.Precommits
|
||||||
} else {
|
} else {
|
||||||
ps.CatchupCommit = cmn.NewBitArray(numValidators)
|
ps.PRS.CatchupCommit = cmn.NewBitArray(numValidators)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1076,22 +1060,22 @@ func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) {
|
func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) {
|
||||||
if ps.Height == height {
|
if ps.PRS.Height == height {
|
||||||
if ps.Prevotes == nil {
|
if ps.PRS.Prevotes == nil {
|
||||||
ps.Prevotes = cmn.NewBitArray(numValidators)
|
ps.PRS.Prevotes = cmn.NewBitArray(numValidators)
|
||||||
}
|
}
|
||||||
if ps.Precommits == nil {
|
if ps.PRS.Precommits == nil {
|
||||||
ps.Precommits = cmn.NewBitArray(numValidators)
|
ps.PRS.Precommits = cmn.NewBitArray(numValidators)
|
||||||
}
|
}
|
||||||
if ps.CatchupCommit == nil {
|
if ps.PRS.CatchupCommit == nil {
|
||||||
ps.CatchupCommit = cmn.NewBitArray(numValidators)
|
ps.PRS.CatchupCommit = cmn.NewBitArray(numValidators)
|
||||||
}
|
}
|
||||||
if ps.ProposalPOL == nil {
|
if ps.PRS.ProposalPOL == nil {
|
||||||
ps.ProposalPOL = cmn.NewBitArray(numValidators)
|
ps.PRS.ProposalPOL = cmn.NewBitArray(numValidators)
|
||||||
}
|
}
|
||||||
} else if ps.Height == height+1 {
|
} else if ps.PRS.Height == height+1 {
|
||||||
if ps.LastCommit == nil {
|
if ps.PRS.LastCommit == nil {
|
||||||
ps.LastCommit = cmn.NewBitArray(numValidators)
|
ps.PRS.LastCommit = cmn.NewBitArray(numValidators)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1103,12 +1087,12 @@ func (ps *PeerState) RecordVote(vote *types.Vote) int {
|
|||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
if ps.stats.lastVoteHeight >= vote.Height {
|
if ps.Stats.LastVoteHeight >= vote.Height {
|
||||||
return ps.stats.votes
|
return ps.Stats.Votes
|
||||||
}
|
}
|
||||||
ps.stats.lastVoteHeight = vote.Height
|
ps.Stats.LastVoteHeight = vote.Height
|
||||||
ps.stats.votes++
|
ps.Stats.Votes++
|
||||||
return ps.stats.votes
|
return ps.Stats.Votes
|
||||||
}
|
}
|
||||||
|
|
||||||
// VotesSent returns the number of blocks for which peer has been sending us
|
// VotesSent returns the number of blocks for which peer has been sending us
|
||||||
@@ -1117,7 +1101,7 @@ func (ps *PeerState) VotesSent() int {
|
|||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
return ps.stats.votes
|
return ps.Stats.Votes
|
||||||
}
|
}
|
||||||
|
|
||||||
// RecordBlockPart updates internal statistics for this peer by recording the
|
// RecordBlockPart updates internal statistics for this peer by recording the
|
||||||
@@ -1128,13 +1112,13 @@ func (ps *PeerState) RecordBlockPart(bp *BlockPartMessage) int {
|
|||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
if ps.stats.lastBlockPartHeight >= bp.Height {
|
if ps.Stats.LastBlockPartHeight >= bp.Height {
|
||||||
return ps.stats.blockParts
|
return ps.Stats.BlockParts
|
||||||
}
|
}
|
||||||
|
|
||||||
ps.stats.lastBlockPartHeight = bp.Height
|
ps.Stats.LastBlockPartHeight = bp.Height
|
||||||
ps.stats.blockParts++
|
ps.Stats.BlockParts++
|
||||||
return ps.stats.blockParts
|
return ps.Stats.BlockParts
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockPartsSent returns the number of blocks for which peer has been sending
|
// BlockPartsSent returns the number of blocks for which peer has been sending
|
||||||
@@ -1143,7 +1127,7 @@ func (ps *PeerState) BlockPartsSent() int {
|
|||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
return ps.stats.blockParts
|
return ps.Stats.BlockParts
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetHasVote sets the given vote as known by the peer
|
// SetHasVote sets the given vote as known by the peer
|
||||||
@@ -1155,7 +1139,7 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ps *PeerState) setHasVote(height int64, round int, type_ byte, index int) {
|
func (ps *PeerState) setHasVote(height int64, round int, type_ byte, index int) {
|
||||||
logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.Height, ps.Round), "H/R", cmn.Fmt("%d/%d", height, round))
|
logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", cmn.Fmt("%d/%d", height, round))
|
||||||
logger.Debug("setHasVote", "type", type_, "index", index)
|
logger.Debug("setHasVote", "type", type_, "index", index)
|
||||||
|
|
||||||
// NOTE: some may be nil BitArrays -> no side effects.
|
// NOTE: some may be nil BitArrays -> no side effects.
|
||||||
@@ -1171,51 +1155,51 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
|
|||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
// Ignore duplicates or decreases
|
// Ignore duplicates or decreases
|
||||||
if CompareHRS(msg.Height, msg.Round, msg.Step, ps.Height, ps.Round, ps.Step) <= 0 {
|
if CompareHRS(msg.Height, msg.Round, msg.Step, ps.PRS.Height, ps.PRS.Round, ps.PRS.Step) <= 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Just remember these values.
|
// Just remember these values.
|
||||||
psHeight := ps.Height
|
psHeight := ps.PRS.Height
|
||||||
psRound := ps.Round
|
psRound := ps.PRS.Round
|
||||||
//psStep := ps.Step
|
//psStep := ps.PRS.Step
|
||||||
psCatchupCommitRound := ps.CatchupCommitRound
|
psCatchupCommitRound := ps.PRS.CatchupCommitRound
|
||||||
psCatchupCommit := ps.CatchupCommit
|
psCatchupCommit := ps.PRS.CatchupCommit
|
||||||
|
|
||||||
startTime := time.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
|
startTime := time.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
|
||||||
ps.Height = msg.Height
|
ps.PRS.Height = msg.Height
|
||||||
ps.Round = msg.Round
|
ps.PRS.Round = msg.Round
|
||||||
ps.Step = msg.Step
|
ps.PRS.Step = msg.Step
|
||||||
ps.StartTime = startTime
|
ps.PRS.StartTime = startTime
|
||||||
if psHeight != msg.Height || psRound != msg.Round {
|
if psHeight != msg.Height || psRound != msg.Round {
|
||||||
ps.Proposal = false
|
ps.PRS.Proposal = false
|
||||||
ps.ProposalBlockPartsHeader = types.PartSetHeader{}
|
ps.PRS.ProposalBlockPartsHeader = types.PartSetHeader{}
|
||||||
ps.ProposalBlockParts = nil
|
ps.PRS.ProposalBlockParts = nil
|
||||||
ps.ProposalPOLRound = -1
|
ps.PRS.ProposalPOLRound = -1
|
||||||
ps.ProposalPOL = nil
|
ps.PRS.ProposalPOL = nil
|
||||||
// We'll update the BitArray capacity later.
|
// We'll update the BitArray capacity later.
|
||||||
ps.Prevotes = nil
|
ps.PRS.Prevotes = nil
|
||||||
ps.Precommits = nil
|
ps.PRS.Precommits = nil
|
||||||
}
|
}
|
||||||
if psHeight == msg.Height && psRound != msg.Round && msg.Round == psCatchupCommitRound {
|
if psHeight == msg.Height && psRound != msg.Round && msg.Round == psCatchupCommitRound {
|
||||||
// Peer caught up to CatchupCommitRound.
|
// Peer caught up to CatchupCommitRound.
|
||||||
// Preserve psCatchupCommit!
|
// Preserve psCatchupCommit!
|
||||||
// NOTE: We prefer to use prs.Precommits if
|
// NOTE: We prefer to use prs.Precommits if
|
||||||
// pr.Round matches pr.CatchupCommitRound.
|
// pr.Round matches pr.CatchupCommitRound.
|
||||||
ps.Precommits = psCatchupCommit
|
ps.PRS.Precommits = psCatchupCommit
|
||||||
}
|
}
|
||||||
if psHeight != msg.Height {
|
if psHeight != msg.Height {
|
||||||
// Shift Precommits to LastCommit.
|
// Shift Precommits to LastCommit.
|
||||||
if psHeight+1 == msg.Height && psRound == msg.LastCommitRound {
|
if psHeight+1 == msg.Height && psRound == msg.LastCommitRound {
|
||||||
ps.LastCommitRound = msg.LastCommitRound
|
ps.PRS.LastCommitRound = msg.LastCommitRound
|
||||||
ps.LastCommit = ps.Precommits
|
ps.PRS.LastCommit = ps.PRS.Precommits
|
||||||
} else {
|
} else {
|
||||||
ps.LastCommitRound = msg.LastCommitRound
|
ps.PRS.LastCommitRound = msg.LastCommitRound
|
||||||
ps.LastCommit = nil
|
ps.PRS.LastCommit = nil
|
||||||
}
|
}
|
||||||
// We'll update the BitArray capacity later.
|
// We'll update the BitArray capacity later.
|
||||||
ps.CatchupCommitRound = -1
|
ps.PRS.CatchupCommitRound = -1
|
||||||
ps.CatchupCommit = nil
|
ps.PRS.CatchupCommit = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1224,12 +1208,12 @@ func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) {
|
|||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
if ps.Height != msg.Height {
|
if ps.PRS.Height != msg.Height {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ps.ProposalBlockPartsHeader = msg.BlockPartsHeader
|
ps.PRS.ProposalBlockPartsHeader = msg.BlockPartsHeader
|
||||||
ps.ProposalBlockParts = msg.BlockParts
|
ps.PRS.ProposalBlockParts = msg.BlockParts
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyProposalPOLMessage updates the peer state for the new proposal POL.
|
// ApplyProposalPOLMessage updates the peer state for the new proposal POL.
|
||||||
@@ -1237,16 +1221,16 @@ func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) {
|
|||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
if ps.Height != msg.Height {
|
if ps.PRS.Height != msg.Height {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if ps.ProposalPOLRound != msg.ProposalPOLRound {
|
if ps.PRS.ProposalPOLRound != msg.ProposalPOLRound {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Merge onto existing ps.ProposalPOL?
|
// TODO: Merge onto existing ps.PRS.ProposalPOL?
|
||||||
// We might have sent some prevotes in the meantime.
|
// We might have sent some prevotes in the meantime.
|
||||||
ps.ProposalPOL = msg.ProposalPOL
|
ps.PRS.ProposalPOL = msg.ProposalPOL
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyHasVoteMessage updates the peer state for the new vote.
|
// ApplyHasVoteMessage updates the peer state for the new vote.
|
||||||
@@ -1254,7 +1238,7 @@ func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) {
|
|||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
|
|
||||||
if ps.Height != msg.Height {
|
if ps.PRS.Height != msg.Height {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1292,13 +1276,13 @@ func (ps *PeerState) StringIndented(indent string) string {
|
|||||||
ps.mtx.Lock()
|
ps.mtx.Lock()
|
||||||
defer ps.mtx.Unlock()
|
defer ps.mtx.Unlock()
|
||||||
return fmt.Sprintf(`PeerState{
|
return fmt.Sprintf(`PeerState{
|
||||||
%s Key %v
|
%s Key %v
|
||||||
%s PRS %v
|
%s RoundState %v
|
||||||
%s Stats %v
|
%s Stats %v
|
||||||
%s}`,
|
%s}`,
|
||||||
indent, ps.Peer.ID(),
|
indent, ps.peer.ID(),
|
||||||
indent, ps.PeerRoundState.StringIndented(indent+" "),
|
indent, ps.PRS.StringIndented(indent+" "),
|
||||||
indent, ps.stats,
|
indent, ps.Stats,
|
||||||
indent)
|
indent)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -26,20 +26,24 @@ import (
|
|||||||
var crc32c = crc32.MakeTable(crc32.Castagnoli)
|
var crc32c = crc32.MakeTable(crc32.Castagnoli)
|
||||||
|
|
||||||
// Functionality to replay blocks and messages on recovery from a crash.
|
// Functionality to replay blocks and messages on recovery from a crash.
|
||||||
// There are two general failure scenarios: failure during consensus, and failure while applying the block.
|
// There are two general failure scenarios:
|
||||||
// The former is handled by the WAL, the latter by the proxyApp Handshake on restart,
|
//
|
||||||
// which ultimately hands off the work to the WAL.
|
// 1. failure during consensus
|
||||||
|
// 2. failure while applying the block
|
||||||
|
//
|
||||||
|
// The former is handled by the WAL, the latter by the proxyApp Handshake on
|
||||||
|
// restart, which ultimately hands off the work to the WAL.
|
||||||
|
|
||||||
//-----------------------------------------
|
//-----------------------------------------
|
||||||
// recover from failure during consensus
|
// 1. Recover from failure during consensus
|
||||||
// by replaying messages from the WAL
|
// (by replaying messages from the WAL)
|
||||||
|
//-----------------------------------------
|
||||||
|
|
||||||
// Unmarshal and apply a single message to the consensus state
|
// Unmarshal and apply a single message to the consensus state as if it were
|
||||||
// as if it were received in receiveRoutine
|
// received in receiveRoutine. Lines that start with "#" are ignored.
|
||||||
// Lines that start with "#" are ignored.
|
// NOTE: receiveRoutine should not be running.
|
||||||
// NOTE: receiveRoutine should not be running
|
|
||||||
func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan interface{}) error {
|
func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan interface{}) error {
|
||||||
// skip meta messages
|
// Skip meta messages which exist for demarcating boundaries.
|
||||||
if _, ok := msg.Msg.(EndHeightMessage); ok {
|
if _, ok := msg.Msg.(EndHeightMessage); ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -89,17 +93,18 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// replay only those messages since the last block.
|
// Replay only those messages since the last block. `timeoutRoutine` should
|
||||||
// timeoutRoutine should run concurrently to read off tickChan
|
// run concurrently to read off tickChan.
|
||||||
func (cs *ConsensusState) catchupReplay(csHeight int64) error {
|
func (cs *ConsensusState) catchupReplay(csHeight int64) error {
|
||||||
// set replayMode
|
|
||||||
|
// Set replayMode to true so we don't log signing errors.
|
||||||
cs.replayMode = true
|
cs.replayMode = true
|
||||||
defer func() { cs.replayMode = false }()
|
defer func() { cs.replayMode = false }()
|
||||||
|
|
||||||
// Ensure that ENDHEIGHT for this height doesn't exist.
|
// Ensure that #ENDHEIGHT for this height doesn't exist.
|
||||||
// NOTE: This is just a sanity check. As far as we know things work fine
|
// NOTE: This is just a sanity check. As far as we know things work fine
|
||||||
// without it, and Handshake could reuse ConsensusState if it weren't for
|
// without it, and Handshake could reuse ConsensusState if it weren't for
|
||||||
// this check (since we can crash after writing ENDHEIGHT).
|
// this check (since we can crash after writing #ENDHEIGHT).
|
||||||
//
|
//
|
||||||
// Ignore data corruption errors since this is a sanity check.
|
// Ignore data corruption errors since this is a sanity check.
|
||||||
gr, found, err := cs.wal.SearchForEndHeight(csHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
|
gr, found, err := cs.wal.SearchForEndHeight(csHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
|
||||||
@@ -115,7 +120,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
|
|||||||
return fmt.Errorf("WAL should not contain #ENDHEIGHT %d", csHeight)
|
return fmt.Errorf("WAL should not contain #ENDHEIGHT %d", csHeight)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Search for last height marker
|
// Search for last height marker.
|
||||||
//
|
//
|
||||||
// Ignore data corruption errors in previous heights because we only care about last height
|
// Ignore data corruption errors in previous heights because we only care about last height
|
||||||
gr, found, err = cs.wal.SearchForEndHeight(csHeight-1, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
|
gr, found, err = cs.wal.SearchForEndHeight(csHeight-1, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
|
||||||
@@ -182,10 +187,11 @@ func makeHeightSearchFunc(height int64) auto.SearchFunc {
|
|||||||
}
|
}
|
||||||
}*/
|
}*/
|
||||||
|
|
||||||
//----------------------------------------------
|
//---------------------------------------------------
|
||||||
// Recover from failure during block processing
|
// 2. Recover from failure while applying the block.
|
||||||
// by handshaking with the app to figure out where
|
// (by handshaking with the app to figure out where
|
||||||
// we were last and using the WAL to recover there
|
// we were last, and using the WAL to recover there.)
|
||||||
|
//---------------------------------------------------
|
||||||
|
|
||||||
type Handshaker struct {
|
type Handshaker struct {
|
||||||
stateDB dbm.DB
|
stateDB dbm.DB
|
||||||
@@ -220,7 +226,8 @@ func (h *Handshaker) NBlocks() int {
|
|||||||
|
|
||||||
// TODO: retry the handshake/replay if it fails ?
|
// TODO: retry the handshake/replay if it fails ?
|
||||||
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||||
// handshake is done via info request on the query conn
|
|
||||||
|
// Handshake is done via ABCI Info on the query conn.
|
||||||
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{version.Version})
|
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{version.Version})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error calling Info: %v", err)
|
return fmt.Errorf("Error calling Info: %v", err)
|
||||||
@@ -234,15 +241,16 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
|||||||
|
|
||||||
h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
|
h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
|
||||||
|
|
||||||
// TODO: check version
|
// TODO: check app version.
|
||||||
|
|
||||||
// replay blocks up to the latest in the blockstore
|
// Replay blocks up to the latest in the blockstore.
|
||||||
_, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp)
|
_, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error on replay: %v", err)
|
return fmt.Errorf("Error on replay: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
|
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced",
|
||||||
|
"appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
|
||||||
|
|
||||||
// TODO: (on restart) replay mempool
|
// TODO: (on restart) replay mempool
|
||||||
|
|
||||||
@@ -250,7 +258,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Replay all blocks since appBlockHeight and ensure the result matches the current state.
|
// Replay all blocks since appBlockHeight and ensure the result matches the current state.
|
||||||
// Returns the final AppHash or an error
|
// Returns the final AppHash or an error.
|
||||||
func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) {
|
func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) {
|
||||||
|
|
||||||
storeBlockHeight := h.store.Height()
|
storeBlockHeight := h.store.Height()
|
||||||
@@ -261,8 +269,8 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
|
|||||||
if appBlockHeight == 0 {
|
if appBlockHeight == 0 {
|
||||||
validators := types.TM2PB.Validators(state.Validators)
|
validators := types.TM2PB.Validators(state.Validators)
|
||||||
req := abci.RequestInitChain{
|
req := abci.RequestInitChain{
|
||||||
Validators: validators,
|
Validators: validators,
|
||||||
AppStateBytes: h.appState,
|
GenesisBytes: h.appState,
|
||||||
}
|
}
|
||||||
_, err := proxyApp.Consensus().InitChainSync(req)
|
_, err := proxyApp.Consensus().InitChainSync(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -314,7 +322,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
|
|||||||
// We haven't run Commit (both the state and app are one block behind),
|
// We haven't run Commit (both the state and app are one block behind),
|
||||||
// so replayBlock with the real app.
|
// so replayBlock with the real app.
|
||||||
// NOTE: We could instead use the cs.WAL on cs.Start,
|
// NOTE: We could instead use the cs.WAL on cs.Start,
|
||||||
// but we'd have to allow the WAL to replay a block that wrote it's ENDHEIGHT
|
// but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT
|
||||||
h.logger.Info("Replay last block using real app")
|
h.logger.Info("Replay last block using real app")
|
||||||
state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus())
|
state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus())
|
||||||
return state.AppHash, err
|
return state.AppHash, err
|
||||||
@@ -357,7 +365,8 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
|
|||||||
for i := appBlockHeight + 1; i <= finalBlock; i++ {
|
for i := appBlockHeight + 1; i <= finalBlock; i++ {
|
||||||
h.logger.Info("Applying block", "height", i)
|
h.logger.Info("Applying block", "height", i)
|
||||||
block := h.store.LoadBlock(i)
|
block := h.store.LoadBlock(i)
|
||||||
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger)
|
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, new(types.ValidatorSet))
|
||||||
|
// TODO: Temporary, see above comment.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@@ -29,6 +29,7 @@ const (
|
|||||||
//--------------------------------------------------------
|
//--------------------------------------------------------
|
||||||
// replay messages interactively or all at once
|
// replay messages interactively or all at once
|
||||||
|
|
||||||
|
// replay the wal file
|
||||||
func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) {
|
func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) {
|
||||||
consensusState := newConsensusStateForReplay(config, csConfig)
|
consensusState := newConsensusStateForReplay(config, csConfig)
|
||||||
|
|
||||||
@@ -262,7 +263,7 @@ func (pb *playback) replayConsoleLoop() int {
|
|||||||
case "locked_block":
|
case "locked_block":
|
||||||
fmt.Printf("%v %v\n", rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort())
|
fmt.Printf("%v %v\n", rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort())
|
||||||
case "votes":
|
case "votes":
|
||||||
fmt.Println(rs.Votes.StringIndented(" "))
|
fmt.Println(rs.Votes.StringIndented(" "))
|
||||||
|
|
||||||
default:
|
default:
|
||||||
fmt.Println("Unknown option", tokens[1])
|
fmt.Println("Unknown option", tokens[1])
|
||||||
|
@@ -17,7 +17,7 @@ import (
|
|||||||
|
|
||||||
"github.com/tendermint/abci/example/kvstore"
|
"github.com/tendermint/abci/example/kvstore"
|
||||||
abci "github.com/tendermint/abci/types"
|
abci "github.com/tendermint/abci/types"
|
||||||
"github.com/tendermint/go-crypto"
|
crypto "github.com/tendermint/go-crypto"
|
||||||
auto "github.com/tendermint/tmlibs/autofile"
|
auto "github.com/tendermint/tmlibs/autofile"
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
dbm "github.com/tendermint/tmlibs/db"
|
dbm "github.com/tendermint/tmlibs/db"
|
||||||
@@ -218,15 +218,15 @@ func (e ReachedHeightToStopError) Error() string {
|
|||||||
return fmt.Sprintf("reached height to stop %d", e.height)
|
return fmt.Sprintf("reached height to stop %d", e.height)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save simulate WAL's crashing by sending an error to the panicCh and then
|
// Write simulate WAL's crashing by sending an error to the panicCh and then
|
||||||
// exiting the cs.receiveRoutine.
|
// exiting the cs.receiveRoutine.
|
||||||
func (w *crashingWAL) Save(m WALMessage) {
|
func (w *crashingWAL) Write(m WALMessage) {
|
||||||
if endMsg, ok := m.(EndHeightMessage); ok {
|
if endMsg, ok := m.(EndHeightMessage); ok {
|
||||||
if endMsg.Height == w.heightToStop {
|
if endMsg.Height == w.heightToStop {
|
||||||
w.panicCh <- ReachedHeightToStopError{endMsg.Height}
|
w.panicCh <- ReachedHeightToStopError{endMsg.Height}
|
||||||
runtime.Goexit()
|
runtime.Goexit()
|
||||||
} else {
|
} else {
|
||||||
w.next.Save(m)
|
w.next.Write(m)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -238,10 +238,14 @@ func (w *crashingWAL) Save(m WALMessage) {
|
|||||||
runtime.Goexit()
|
runtime.Goexit()
|
||||||
} else {
|
} else {
|
||||||
w.msgIndex++
|
w.msgIndex++
|
||||||
w.next.Save(m)
|
w.next.Write(m)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *crashingWAL) WriteSync(m WALMessage) {
|
||||||
|
w.Write(m)
|
||||||
|
}
|
||||||
|
|
||||||
func (w *crashingWAL) Group() *auto.Group { return w.next.Group() }
|
func (w *crashingWAL) Group() *auto.Group { return w.next.Group() }
|
||||||
func (w *crashingWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
func (w *crashingWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
||||||
return w.next.SearchForEndHeight(height, options)
|
return w.next.SearchForEndHeight(height, options)
|
||||||
@@ -327,7 +331,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
|||||||
|
|
||||||
privVal := pvm.LoadFilePV(config.PrivValidatorFile())
|
privVal := pvm.LoadFilePV(config.PrivValidatorFile())
|
||||||
|
|
||||||
wal, err := NewWAL(walFile, false)
|
wal, err := NewWAL(walFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -538,7 +542,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
|||||||
case *types.PartSetHeader:
|
case *types.PartSetHeader:
|
||||||
thisBlockParts = types.NewPartSetFromHeader(*p)
|
thisBlockParts = types.NewPartSetFromHeader(*p)
|
||||||
case *types.Part:
|
case *types.Part:
|
||||||
_, err := thisBlockParts.AddPart(p, false)
|
_, err := thisBlockParts.AddPart(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@@ -15,6 +15,7 @@ import (
|
|||||||
|
|
||||||
cfg "github.com/tendermint/tendermint/config"
|
cfg "github.com/tendermint/tendermint/config"
|
||||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||||
|
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||||
"github.com/tendermint/tendermint/p2p"
|
"github.com/tendermint/tendermint/p2p"
|
||||||
sm "github.com/tendermint/tendermint/state"
|
sm "github.com/tendermint/tendermint/state"
|
||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
@@ -110,6 +111,10 @@ type ConsensusState struct {
|
|||||||
|
|
||||||
// closed when we finish shutting down
|
// closed when we finish shutting down
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
|
||||||
|
// synchronous pubsub between consensus state and reactor.
|
||||||
|
// state only emits EventNewRoundStep, EventVote and EventProposalHeartbeat
|
||||||
|
evsw tmevents.EventSwitch
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConsensusState returns a new ConsensusState.
|
// NewConsensusState returns a new ConsensusState.
|
||||||
@@ -126,6 +131,7 @@ func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *s
|
|||||||
doWALCatchup: true,
|
doWALCatchup: true,
|
||||||
wal: nilWAL{},
|
wal: nilWAL{},
|
||||||
evpool: evpool,
|
evpool: evpool,
|
||||||
|
evsw: tmevents.NewEventSwitch(),
|
||||||
}
|
}
|
||||||
// set function defaults (may be overwritten before calling Start)
|
// set function defaults (may be overwritten before calling Start)
|
||||||
cs.decideProposal = cs.defaultDecideProposal
|
cs.decideProposal = cs.defaultDecideProposal
|
||||||
@@ -185,6 +191,14 @@ func (cs *ConsensusState) GetRoundStateJSON() ([]byte, error) {
|
|||||||
return cdc.MarshalJSON(cs.RoundState)
|
return cdc.MarshalJSON(cs.RoundState)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetRoundStateSimpleJSON returns a json of RoundStateSimple, marshalled using go-amino.
|
||||||
|
func (cs *ConsensusState) GetRoundStateSimpleJSON() ([]byte, error) {
|
||||||
|
cs.mtx.Lock()
|
||||||
|
defer cs.mtx.Unlock()
|
||||||
|
|
||||||
|
return cdc.MarshalJSON(cs.RoundState.RoundStateSimple())
|
||||||
|
}
|
||||||
|
|
||||||
// GetValidators returns a copy of the current validators.
|
// GetValidators returns a copy of the current validators.
|
||||||
func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) {
|
func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) {
|
||||||
cs.mtx.Lock()
|
cs.mtx.Lock()
|
||||||
@@ -219,6 +233,10 @@ func (cs *ConsensusState) LoadCommit(height int64) *types.Commit {
|
|||||||
// OnStart implements cmn.Service.
|
// OnStart implements cmn.Service.
|
||||||
// It loads the latest state via the WAL, and starts the timeout and receive routines.
|
// It loads the latest state via the WAL, and starts the timeout and receive routines.
|
||||||
func (cs *ConsensusState) OnStart() error {
|
func (cs *ConsensusState) OnStart() error {
|
||||||
|
if err := cs.evsw.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// we may set the WAL in testing before calling Start,
|
// we may set the WAL in testing before calling Start,
|
||||||
// so only OpenWAL if its still the nilWAL
|
// so only OpenWAL if its still the nilWAL
|
||||||
if _, ok := cs.wal.(nilWAL); ok {
|
if _, ok := cs.wal.(nilWAL); ok {
|
||||||
@@ -236,8 +254,7 @@ func (cs *ConsensusState) OnStart() error {
|
|||||||
// NOTE: we will get a build up of garbage go routines
|
// NOTE: we will get a build up of garbage go routines
|
||||||
// firing on the tockChan until the receiveRoutine is started
|
// firing on the tockChan until the receiveRoutine is started
|
||||||
// to deal with them (by that point, at most one will be valid)
|
// to deal with them (by that point, at most one will be valid)
|
||||||
err := cs.timeoutTicker.Start()
|
if err := cs.timeoutTicker.Start(); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -276,6 +293,8 @@ func (cs *ConsensusState) startRoutines(maxSteps int) {
|
|||||||
func (cs *ConsensusState) OnStop() {
|
func (cs *ConsensusState) OnStop() {
|
||||||
cs.BaseService.OnStop()
|
cs.BaseService.OnStop()
|
||||||
|
|
||||||
|
cs.evsw.Stop()
|
||||||
|
|
||||||
cs.timeoutTicker.Stop()
|
cs.timeoutTicker.Stop()
|
||||||
|
|
||||||
// Make BaseService.Wait() wait until cs.wal.Wait()
|
// Make BaseService.Wait() wait until cs.wal.Wait()
|
||||||
@@ -293,7 +312,7 @@ func (cs *ConsensusState) Wait() {
|
|||||||
|
|
||||||
// OpenWAL opens a file to log all consensus messages and timeouts for deterministic accountability
|
// OpenWAL opens a file to log all consensus messages and timeouts for deterministic accountability
|
||||||
func (cs *ConsensusState) OpenWAL(walFile string) (WAL, error) {
|
func (cs *ConsensusState) OpenWAL(walFile string) (WAL, error) {
|
||||||
wal, err := NewWAL(walFile, cs.config.WalLight)
|
wal, err := NewWAL(walFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cs.Logger.Error("Failed to open WAL for consensus state", "wal", walFile, "err", err)
|
cs.Logger.Error("Failed to open WAL for consensus state", "wal", walFile, "err", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -496,11 +515,12 @@ func (cs *ConsensusState) updateToState(state sm.State) {
|
|||||||
|
|
||||||
func (cs *ConsensusState) newStep() {
|
func (cs *ConsensusState) newStep() {
|
||||||
rs := cs.RoundStateEvent()
|
rs := cs.RoundStateEvent()
|
||||||
cs.wal.Save(rs)
|
cs.wal.Write(rs)
|
||||||
cs.nSteps++
|
cs.nSteps++
|
||||||
// newStep is called by updateToStep in NewConsensusState before the eventBus is set!
|
// newStep is called by updateToStep in NewConsensusState before the eventBus is set!
|
||||||
if cs.eventBus != nil {
|
if cs.eventBus != nil {
|
||||||
cs.eventBus.PublishEventNewRoundStep(rs)
|
cs.eventBus.PublishEventNewRoundStep(rs)
|
||||||
|
cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -534,16 +554,16 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
|
|||||||
case height := <-cs.mempool.TxsAvailable():
|
case height := <-cs.mempool.TxsAvailable():
|
||||||
cs.handleTxsAvailable(height)
|
cs.handleTxsAvailable(height)
|
||||||
case mi = <-cs.peerMsgQueue:
|
case mi = <-cs.peerMsgQueue:
|
||||||
cs.wal.Save(mi)
|
cs.wal.Write(mi)
|
||||||
// handles proposals, block parts, votes
|
// handles proposals, block parts, votes
|
||||||
// may generate internal events (votes, complete proposals, 2/3 majorities)
|
// may generate internal events (votes, complete proposals, 2/3 majorities)
|
||||||
cs.handleMsg(mi)
|
cs.handleMsg(mi)
|
||||||
case mi = <-cs.internalMsgQueue:
|
case mi = <-cs.internalMsgQueue:
|
||||||
cs.wal.Save(mi)
|
cs.wal.WriteSync(mi) // NOTE: fsync
|
||||||
// handles proposals, block parts, votes
|
// handles proposals, block parts, votes
|
||||||
cs.handleMsg(mi)
|
cs.handleMsg(mi)
|
||||||
case ti := <-cs.timeoutTicker.Chan(): // tockChan:
|
case ti := <-cs.timeoutTicker.Chan(): // tockChan:
|
||||||
cs.wal.Save(ti)
|
cs.wal.Write(ti)
|
||||||
// if the timeout is relevant to the rs
|
// if the timeout is relevant to the rs
|
||||||
// go to the next step
|
// go to the next step
|
||||||
cs.handleTimeout(ti, rs)
|
cs.handleTimeout(ti, rs)
|
||||||
@@ -576,8 +596,9 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
|
|||||||
err = cs.setProposal(msg.Proposal)
|
err = cs.setProposal(msg.Proposal)
|
||||||
case *BlockPartMessage:
|
case *BlockPartMessage:
|
||||||
// if the proposal is complete, we'll enterPrevote or tryFinalizeCommit
|
// if the proposal is complete, we'll enterPrevote or tryFinalizeCommit
|
||||||
_, err = cs.addProposalBlockPart(msg.Height, msg.Part, peerID != "")
|
_, err = cs.addProposalBlockPart(msg.Height, msg.Part)
|
||||||
if err != nil && msg.Round != cs.Round {
|
if err != nil && msg.Round != cs.Round {
|
||||||
|
cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round)
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
case *VoteMessage:
|
case *VoteMessage:
|
||||||
@@ -602,7 +623,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
|
|||||||
cs.Logger.Error("Unknown msg type", reflect.TypeOf(msg))
|
cs.Logger.Error("Unknown msg type", reflect.TypeOf(msg))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cs.Logger.Error("Error with msg", "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg)
|
cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -659,16 +680,18 @@ func (cs *ConsensusState) handleTxsAvailable(height int64) {
|
|||||||
// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round)
|
// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round)
|
||||||
// NOTE: cs.StartTime was already set for height.
|
// NOTE: cs.StartTime was already set for height.
|
||||||
func (cs *ConsensusState) enterNewRound(height int64, round int) {
|
func (cs *ConsensusState) enterNewRound(height int64, round int) {
|
||||||
|
logger := cs.Logger.With("height", height, "round", round)
|
||||||
|
|
||||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
|
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
|
||||||
cs.Logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if now := time.Now(); cs.StartTime.After(now) {
|
if now := time.Now(); cs.StartTime.After(now) {
|
||||||
cs.Logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now)
|
logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now)
|
||||||
}
|
}
|
||||||
|
|
||||||
cs.Logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||||
|
|
||||||
// Increment validators if necessary
|
// Increment validators if necessary
|
||||||
validators := cs.Validators
|
validators := cs.Validators
|
||||||
@@ -687,6 +710,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
|
|||||||
// and meanwhile we might have received a proposal
|
// and meanwhile we might have received a proposal
|
||||||
// for round 0.
|
// for round 0.
|
||||||
} else {
|
} else {
|
||||||
|
logger.Info("Resetting Proposal info")
|
||||||
cs.Proposal = nil
|
cs.Proposal = nil
|
||||||
cs.ProposalBlock = nil
|
cs.ProposalBlock = nil
|
||||||
cs.ProposalBlockParts = nil
|
cs.ProposalBlockParts = nil
|
||||||
@@ -740,6 +764,7 @@ func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
|
|||||||
}
|
}
|
||||||
cs.privValidator.SignHeartbeat(chainID, heartbeat)
|
cs.privValidator.SignHeartbeat(chainID, heartbeat)
|
||||||
cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat})
|
cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat})
|
||||||
|
cs.evsw.FireEvent(types.EventProposalHeartbeat, heartbeat)
|
||||||
counter++
|
counter++
|
||||||
time.Sleep(proposalHeartbeatIntervalSeconds * time.Second)
|
time.Sleep(proposalHeartbeatIntervalSeconds * time.Second)
|
||||||
}
|
}
|
||||||
@@ -749,11 +774,13 @@ func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
|
|||||||
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
|
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
|
||||||
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
|
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
|
||||||
func (cs *ConsensusState) enterPropose(height int64, round int) {
|
func (cs *ConsensusState) enterPropose(height int64, round int) {
|
||||||
|
logger := cs.Logger.With("height", height, "round", round)
|
||||||
|
|
||||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
|
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
|
||||||
cs.Logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cs.Logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
// Done enterPropose:
|
// Done enterPropose:
|
||||||
@@ -773,22 +800,22 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
|
|||||||
|
|
||||||
// Nothing more to do if we're not a validator
|
// Nothing more to do if we're not a validator
|
||||||
if cs.privValidator == nil {
|
if cs.privValidator == nil {
|
||||||
cs.Logger.Debug("This node is not a validator")
|
logger.Debug("This node is not a validator")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// if not a validator, we're done
|
// if not a validator, we're done
|
||||||
if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
|
if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
|
||||||
cs.Logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators)
|
logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cs.Logger.Debug("This node is a validator")
|
logger.Debug("This node is a validator")
|
||||||
|
|
||||||
if cs.isProposer() {
|
if cs.isProposer() {
|
||||||
cs.Logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
||||||
cs.decideProposal(height, round)
|
cs.decideProposal(height, round)
|
||||||
} else {
|
} else {
|
||||||
cs.Logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -951,14 +978,16 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) {
|
|||||||
|
|
||||||
// Enter: any +2/3 prevotes at next round.
|
// Enter: any +2/3 prevotes at next round.
|
||||||
func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
|
func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
|
||||||
|
logger := cs.Logger.With("height", height, "round", round)
|
||||||
|
|
||||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
|
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
|
||||||
cs.Logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !cs.Votes.Prevotes(round).HasTwoThirdsAny() {
|
if !cs.Votes.Prevotes(round).HasTwoThirdsAny() {
|
||||||
cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
|
cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
|
||||||
}
|
}
|
||||||
cs.Logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
// Done enterPrevoteWait:
|
// Done enterPrevoteWait:
|
||||||
@@ -977,12 +1006,14 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
|
|||||||
// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil,
|
// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil,
|
||||||
// else, precommit nil otherwise.
|
// else, precommit nil otherwise.
|
||||||
func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||||
|
logger := cs.Logger.With("height", height, "round", round)
|
||||||
|
|
||||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
|
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
|
||||||
cs.Logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cs.Logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
// Done enterPrecommit:
|
// Done enterPrecommit:
|
||||||
@@ -990,23 +1021,24 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
|||||||
cs.newStep()
|
cs.newStep()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
// check for a polka
|
||||||
blockID, ok := cs.Votes.Prevotes(round).TwoThirdsMajority()
|
blockID, ok := cs.Votes.Prevotes(round).TwoThirdsMajority()
|
||||||
|
|
||||||
// If we don't have a polka, we must precommit nil
|
// If we don't have a polka, we must precommit nil.
|
||||||
if !ok {
|
if !ok {
|
||||||
if cs.LockedBlock != nil {
|
if cs.LockedBlock != nil {
|
||||||
cs.Logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
|
logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
|
||||||
} else {
|
} else {
|
||||||
cs.Logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
|
logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
|
||||||
}
|
}
|
||||||
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
|
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// At this point +2/3 prevoted for a particular block or nil
|
// At this point +2/3 prevoted for a particular block or nil.
|
||||||
cs.eventBus.PublishEventPolka(cs.RoundStateEvent())
|
cs.eventBus.PublishEventPolka(cs.RoundStateEvent())
|
||||||
|
|
||||||
// the latest POLRound should be this round
|
// the latest POLRound should be this round.
|
||||||
polRound, _ := cs.Votes.POLInfo()
|
polRound, _ := cs.Votes.POLInfo()
|
||||||
if polRound < round {
|
if polRound < round {
|
||||||
cmn.PanicSanity(cmn.Fmt("This POLRound should be %v but got %", round, polRound))
|
cmn.PanicSanity(cmn.Fmt("This POLRound should be %v but got %", round, polRound))
|
||||||
@@ -1015,9 +1047,9 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
|||||||
// +2/3 prevoted nil. Unlock and precommit nil.
|
// +2/3 prevoted nil. Unlock and precommit nil.
|
||||||
if len(blockID.Hash) == 0 {
|
if len(blockID.Hash) == 0 {
|
||||||
if cs.LockedBlock == nil {
|
if cs.LockedBlock == nil {
|
||||||
cs.Logger.Info("enterPrecommit: +2/3 prevoted for nil.")
|
logger.Info("enterPrecommit: +2/3 prevoted for nil.")
|
||||||
} else {
|
} else {
|
||||||
cs.Logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking")
|
logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking")
|
||||||
cs.LockedRound = 0
|
cs.LockedRound = 0
|
||||||
cs.LockedBlock = nil
|
cs.LockedBlock = nil
|
||||||
cs.LockedBlockParts = nil
|
cs.LockedBlockParts = nil
|
||||||
@@ -1031,7 +1063,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
|||||||
|
|
||||||
// If we're already locked on that block, precommit it, and update the LockedRound
|
// If we're already locked on that block, precommit it, and update the LockedRound
|
||||||
if cs.LockedBlock.HashesTo(blockID.Hash) {
|
if cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||||
cs.Logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking")
|
logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking")
|
||||||
cs.LockedRound = round
|
cs.LockedRound = round
|
||||||
cs.eventBus.PublishEventRelock(cs.RoundStateEvent())
|
cs.eventBus.PublishEventRelock(cs.RoundStateEvent())
|
||||||
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
|
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
|
||||||
@@ -1040,7 +1072,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
|||||||
|
|
||||||
// If +2/3 prevoted for proposal block, stage and precommit it
|
// If +2/3 prevoted for proposal block, stage and precommit it
|
||||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||||
cs.Logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
|
logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
|
||||||
// Validate the block.
|
// Validate the block.
|
||||||
if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil {
|
if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil {
|
||||||
cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
|
cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
|
||||||
@@ -1070,14 +1102,16 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
|||||||
|
|
||||||
// Enter: any +2/3 precommits for next round.
|
// Enter: any +2/3 precommits for next round.
|
||||||
func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
|
func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
|
||||||
|
logger := cs.Logger.With("height", height, "round", round)
|
||||||
|
|
||||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) {
|
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) {
|
||||||
cs.Logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !cs.Votes.Precommits(round).HasTwoThirdsAny() {
|
if !cs.Votes.Precommits(round).HasTwoThirdsAny() {
|
||||||
cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
|
cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
|
||||||
}
|
}
|
||||||
cs.Logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
// Done enterPrecommitWait:
|
// Done enterPrecommitWait:
|
||||||
@@ -1092,11 +1126,13 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
|
|||||||
|
|
||||||
// Enter: +2/3 precommits for block
|
// Enter: +2/3 precommits for block
|
||||||
func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
||||||
|
logger := cs.Logger.With("height", height, "commitRound", commitRound)
|
||||||
|
|
||||||
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
|
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
|
||||||
cs.Logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cs.Logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
// Done enterCommit:
|
// Done enterCommit:
|
||||||
@@ -1119,6 +1155,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
|||||||
// Move them over to ProposalBlock if they match the commit hash,
|
// Move them over to ProposalBlock if they match the commit hash,
|
||||||
// otherwise they'll be cleared in updateToState.
|
// otherwise they'll be cleared in updateToState.
|
||||||
if cs.LockedBlock.HashesTo(blockID.Hash) {
|
if cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||||
|
logger.Info("Commit is for locked block. Set ProposalBlock=LockedBlock", "blockHash", blockID.Hash)
|
||||||
cs.ProposalBlock = cs.LockedBlock
|
cs.ProposalBlock = cs.LockedBlock
|
||||||
cs.ProposalBlockParts = cs.LockedBlockParts
|
cs.ProposalBlockParts = cs.LockedBlockParts
|
||||||
}
|
}
|
||||||
@@ -1126,6 +1163,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
|||||||
// If we don't have the block being committed, set up to get it.
|
// If we don't have the block being committed, set up to get it.
|
||||||
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
|
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||||
if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) {
|
if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) {
|
||||||
|
logger.Info("Commit is for a block we don't know about. Set ProposalBlock=nil", "proposal", cs.ProposalBlock.Hash(), "commit", blockID.Hash)
|
||||||
// We're getting the wrong block.
|
// We're getting the wrong block.
|
||||||
// Set up ProposalBlockParts and keep waiting.
|
// Set up ProposalBlockParts and keep waiting.
|
||||||
cs.ProposalBlock = nil
|
cs.ProposalBlock = nil
|
||||||
@@ -1138,19 +1176,21 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
|||||||
|
|
||||||
// If we have the block AND +2/3 commits for it, finalize.
|
// If we have the block AND +2/3 commits for it, finalize.
|
||||||
func (cs *ConsensusState) tryFinalizeCommit(height int64) {
|
func (cs *ConsensusState) tryFinalizeCommit(height int64) {
|
||||||
|
logger := cs.Logger.With("height", height)
|
||||||
|
|
||||||
if cs.Height != height {
|
if cs.Height != height {
|
||||||
cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
|
cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
|
||||||
}
|
}
|
||||||
|
|
||||||
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
||||||
if !ok || len(blockID.Hash) == 0 {
|
if !ok || len(blockID.Hash) == 0 {
|
||||||
cs.Logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.", "height", height)
|
logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
|
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||||
// TODO: this happens every time if we're not a validator (ugly logs)
|
// TODO: this happens every time if we're not a validator (ugly logs)
|
||||||
// TODO: ^^ wait, why does it matter that we're a validator?
|
// TODO: ^^ wait, why does it matter that we're a validator?
|
||||||
cs.Logger.Info("Attempt to finalize failed. We don't have the commit block.", "height", height, "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash)
|
logger.Info("Attempt to finalize failed. We don't have the commit block.", "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1201,23 +1241,28 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
|
|||||||
|
|
||||||
fail.Fail() // XXX
|
fail.Fail() // XXX
|
||||||
|
|
||||||
// Finish writing to the WAL for this height.
|
// Write EndHeightMessage{} for this height, implying that the blockstore
|
||||||
// NOTE: If we fail before writing this, we'll never write it,
|
// has saved the block.
|
||||||
// and just recover by running ApplyBlock in the Handshake.
|
//
|
||||||
// If we moved it before persisting the block, we'd have to allow
|
// If we crash before writing this EndHeightMessage{}, we will recover by
|
||||||
// WAL replay for blocks with an #ENDHEIGHT
|
// running ApplyBlock during the ABCI handshake when we restart. If we
|
||||||
// As is, ConsensusState should not be started again
|
// didn't save the block to the blockstore before writing
|
||||||
// until we successfully call ApplyBlock (ie. here or in Handshake after restart)
|
// EndHeightMessage{}, we'd have to change WAL replay -- currently it
|
||||||
cs.wal.Save(EndHeightMessage{height})
|
// complains about replaying for heights where an #ENDHEIGHT entry already
|
||||||
|
// exists.
|
||||||
|
//
|
||||||
|
// Either way, the ConsensusState should not be resumed until we
|
||||||
|
// successfully call ApplyBlock (ie. later here, or in Handshake after
|
||||||
|
// restart).
|
||||||
|
cs.wal.WriteSync(EndHeightMessage{height}) // NOTE: fsync
|
||||||
|
|
||||||
fail.Fail() // XXX
|
fail.Fail() // XXX
|
||||||
|
|
||||||
// Create a copy of the state for staging
|
// Create a copy of the state for staging and an event cache for txs.
|
||||||
// and an event cache for txs
|
|
||||||
stateCopy := cs.state.Copy()
|
stateCopy := cs.state.Copy()
|
||||||
|
|
||||||
// Execute and commit the block, update and save the state, and update the mempool.
|
// Execute and commit the block, update and save the state, and update the mempool.
|
||||||
// NOTE: the block.AppHash wont reflect these txs until the next block
|
// NOTE The block.AppHash wont reflect these txs until the next block.
|
||||||
var err error
|
var err error
|
||||||
stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{block.Hash(), blockParts.Header()}, block)
|
stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{block.Hash(), blockParts.Header()}, block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1278,23 +1323,26 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
|
|||||||
|
|
||||||
cs.Proposal = proposal
|
cs.Proposal = proposal
|
||||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockPartsHeader)
|
cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockPartsHeader)
|
||||||
|
cs.Logger.Info("Received proposal", "proposal", proposal)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: block is not necessarily valid.
|
// NOTE: block is not necessarily valid.
|
||||||
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block.
|
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block.
|
||||||
func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, verify bool) (added bool, err error) {
|
func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) (added bool, err error) {
|
||||||
// Blocks might be reused, so round mismatch is OK
|
// Blocks might be reused, so round mismatch is OK
|
||||||
if cs.Height != height {
|
if cs.Height != height {
|
||||||
|
cs.Logger.Debug("Received block part from wrong height", "height", height)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// We're not expecting a block part.
|
// We're not expecting a block part.
|
||||||
if cs.ProposalBlockParts == nil {
|
if cs.ProposalBlockParts == nil {
|
||||||
|
cs.Logger.Info("Received a block part when we're not expecting any", "height", height)
|
||||||
return false, nil // TODO: bad peer? Return error?
|
return false, nil // TODO: bad peer? Return error?
|
||||||
}
|
}
|
||||||
|
|
||||||
added, err = cs.ProposalBlockParts.AddPart(part, verify)
|
added, err = cs.ProposalBlockParts.AddPart(part)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return added, err
|
return added, err
|
||||||
}
|
}
|
||||||
@@ -1306,6 +1354,25 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, v
|
|||||||
}
|
}
|
||||||
// NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal
|
// NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal
|
||||||
cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
|
cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
|
||||||
|
|
||||||
|
// Update Valid* if we can.
|
||||||
|
prevotes := cs.Votes.Prevotes(cs.Round)
|
||||||
|
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
|
||||||
|
if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) {
|
||||||
|
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||||
|
cs.Logger.Info("Updating valid block to new proposal block",
|
||||||
|
"valid-round", cs.Round, "valid-block-hash", cs.ProposalBlock.Hash())
|
||||||
|
cs.ValidRound = cs.Round
|
||||||
|
cs.ValidBlock = cs.ProposalBlock
|
||||||
|
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||||
|
}
|
||||||
|
// TODO: In case there is +2/3 majority in Prevotes set for some
|
||||||
|
// block and cs.ProposalBlock contains different block, either
|
||||||
|
// proposer is faulty or voting power of faulty processes is more
|
||||||
|
// than 1/3. We should trigger in the future accountability
|
||||||
|
// procedure at this point.
|
||||||
|
}
|
||||||
|
|
||||||
if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() {
|
if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() {
|
||||||
// Move onto the next step
|
// Move onto the next step
|
||||||
cs.enterPrevote(height, cs.Round)
|
cs.enterPrevote(height, cs.Round)
|
||||||
@@ -1364,6 +1431,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
|||||||
|
|
||||||
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
|
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
|
||||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||||
|
cs.evsw.FireEvent(types.EventVote, vote)
|
||||||
|
|
||||||
// if we can skip timeoutCommit and have all the votes now,
|
// if we can skip timeoutCommit and have all the votes now,
|
||||||
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
|
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
|
||||||
@@ -1391,38 +1459,50 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
|||||||
}
|
}
|
||||||
|
|
||||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||||
|
cs.evsw.FireEvent(types.EventVote, vote)
|
||||||
|
|
||||||
switch vote.Type {
|
switch vote.Type {
|
||||||
case types.VoteTypePrevote:
|
case types.VoteTypePrevote:
|
||||||
prevotes := cs.Votes.Prevotes(vote.Round)
|
prevotes := cs.Votes.Prevotes(vote.Round)
|
||||||
cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort())
|
cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort())
|
||||||
blockID, ok := prevotes.TwoThirdsMajority()
|
|
||||||
// First, unlock if prevotes is a valid POL.
|
// If +2/3 prevotes for a block or nil for *any* round:
|
||||||
// >> lockRound < POLRound <= unlockOrChangeLockRound (see spec)
|
if blockID, ok := prevotes.TwoThirdsMajority(); ok {
|
||||||
// NOTE: If (lockRound < POLRound) but !(POLRound <= unlockOrChangeLockRound),
|
|
||||||
// we'll still enterNewRound(H,vote.R) and enterPrecommit(H,vote.R) to process it
|
// There was a polka!
|
||||||
// there.
|
// If we're locked but this is a recent polka, unlock.
|
||||||
if (cs.LockedBlock != nil) && (cs.LockedRound < vote.Round) && (vote.Round <= cs.Round) {
|
// If it matches our ProposalBlock, update the ValidBlock
|
||||||
if ok && !cs.LockedBlock.HashesTo(blockID.Hash) {
|
|
||||||
|
// Unlock if `cs.LockedRound < vote.Round <= cs.Round`
|
||||||
|
// NOTE: If vote.Round > cs.Round, we'll deal with it when we get to vote.Round
|
||||||
|
if (cs.LockedBlock != nil) &&
|
||||||
|
(cs.LockedRound < vote.Round) &&
|
||||||
|
(vote.Round <= cs.Round) &&
|
||||||
|
!cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||||
|
|
||||||
cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
|
cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
|
||||||
cs.LockedRound = 0
|
cs.LockedRound = 0
|
||||||
cs.LockedBlock = nil
|
cs.LockedBlock = nil
|
||||||
cs.LockedBlockParts = nil
|
cs.LockedBlockParts = nil
|
||||||
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
|
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Update ValidBlock
|
// Update Valid* if we can.
|
||||||
if ok && !blockID.IsZero() && !cs.ValidBlock.HashesTo(blockID.Hash) && vote.Round > cs.ValidRound {
|
// NOTE: our proposal block may be nil or not what received a polka..
|
||||||
// update valid value
|
// TODO: we may want to still update the ValidBlock and obtain it via gossipping
|
||||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
if !blockID.IsZero() &&
|
||||||
|
(cs.ValidRound < vote.Round) &&
|
||||||
|
(vote.Round <= cs.Round) &&
|
||||||
|
cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||||
|
|
||||||
|
cs.Logger.Info("Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round)
|
||||||
cs.ValidRound = vote.Round
|
cs.ValidRound = vote.Round
|
||||||
cs.ValidBlock = cs.ProposalBlock
|
cs.ValidBlock = cs.ProposalBlock
|
||||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||||
}
|
}
|
||||||
//TODO: We might want to update ValidBlock also in case we don't have that block yet,
|
|
||||||
// and obtain the required block using gossiping
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If +2/3 prevotes for *anything* for this or future round:
|
||||||
if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() {
|
if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() {
|
||||||
// Round-skip over to PrevoteWait or goto Precommit.
|
// Round-skip over to PrevoteWait or goto Precommit.
|
||||||
cs.enterNewRound(height, vote.Round) // if the vote is ahead of us
|
cs.enterNewRound(height, vote.Round) // if the vote is ahead of us
|
||||||
@@ -1438,6 +1518,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
|||||||
cs.enterPrevote(height, cs.Round)
|
cs.enterPrevote(height, cs.Round)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
case types.VoteTypePrecommit:
|
case types.VoteTypePrecommit:
|
||||||
precommits := cs.Votes.Precommits(vote.Round)
|
precommits := cs.Votes.Precommits(vote.Round)
|
||||||
cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
|
cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
|
||||||
|
@@ -11,7 +11,7 @@ import (
|
|||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
tmpubsub "github.com/tendermint/tmlibs/pubsub"
|
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@@ -174,6 +174,26 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If a peer claims that it has 2/3 majority for given blockKey, call this.
|
||||||
|
// NOTE: if there are too many peers, or too much peer churn,
|
||||||
|
// this can cause memory issues.
|
||||||
|
// TODO: implement ability to remove peers too
|
||||||
|
func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID p2p.ID, blockID types.BlockID) error {
|
||||||
|
hvs.mtx.Lock()
|
||||||
|
defer hvs.mtx.Unlock()
|
||||||
|
if !types.IsVoteTypeValid(type_) {
|
||||||
|
return fmt.Errorf("SetPeerMaj23: Invalid vote type %v", type_)
|
||||||
|
}
|
||||||
|
voteSet := hvs.getVoteSet(round, type_)
|
||||||
|
if voteSet == nil {
|
||||||
|
return nil // something we don't know about yet
|
||||||
|
}
|
||||||
|
return voteSet.SetPeerMaj23(types.P2PID(peerID), blockID)
|
||||||
|
}
|
||||||
|
|
||||||
|
//---------------------------------------------------------
|
||||||
|
// string and json
|
||||||
|
|
||||||
func (hvs *HeightVoteSet) String() string {
|
func (hvs *HeightVoteSet) String() string {
|
||||||
return hvs.StringIndented("")
|
return hvs.StringIndented("")
|
||||||
}
|
}
|
||||||
@@ -207,19 +227,35 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string {
|
|||||||
indent)
|
indent)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a peer claims that it has 2/3 majority for given blockKey, call this.
|
func (hvs *HeightVoteSet) MarshalJSON() ([]byte, error) {
|
||||||
// NOTE: if there are too many peers, or too much peer churn,
|
|
||||||
// this can cause memory issues.
|
|
||||||
// TODO: implement ability to remove peers too
|
|
||||||
func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID p2p.ID, blockID types.BlockID) error {
|
|
||||||
hvs.mtx.Lock()
|
hvs.mtx.Lock()
|
||||||
defer hvs.mtx.Unlock()
|
defer hvs.mtx.Unlock()
|
||||||
if !types.IsVoteTypeValid(type_) {
|
|
||||||
return fmt.Errorf("SetPeerMaj23: Invalid vote type %v", type_)
|
allVotes := hvs.toAllRoundVotes()
|
||||||
}
|
return cdc.MarshalJSON(allVotes)
|
||||||
voteSet := hvs.getVoteSet(round, type_)
|
}
|
||||||
if voteSet == nil {
|
|
||||||
return nil // something we don't know about yet
|
func (hvs *HeightVoteSet) toAllRoundVotes() []roundVotes {
|
||||||
}
|
totalRounds := hvs.round + 1
|
||||||
return voteSet.SetPeerMaj23(types.P2PID(peerID), blockID)
|
allVotes := make([]roundVotes, totalRounds)
|
||||||
|
// rounds 0 ~ hvs.round inclusive
|
||||||
|
for round := 0; round < totalRounds; round++ {
|
||||||
|
allVotes[round] = roundVotes{
|
||||||
|
Round: round,
|
||||||
|
Prevotes: hvs.roundVoteSets[round].Prevotes.VoteStrings(),
|
||||||
|
PrevotesBitArray: hvs.roundVoteSets[round].Prevotes.BitArrayString(),
|
||||||
|
Precommits: hvs.roundVoteSets[round].Precommits.VoteStrings(),
|
||||||
|
PrecommitsBitArray: hvs.roundVoteSets[round].Precommits.BitArrayString(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO: all other peer catchup rounds
|
||||||
|
return allVotes
|
||||||
|
}
|
||||||
|
|
||||||
|
type roundVotes struct {
|
||||||
|
Round int `json:"round"`
|
||||||
|
Prevotes []string `json:"prevotes"`
|
||||||
|
PrevotesBitArray string `json:"prevotes_bit_array"`
|
||||||
|
Precommits []string `json:"precommits"`
|
||||||
|
PrecommitsBitArray string `json:"precommits_bit_array"`
|
||||||
}
|
}
|
||||||
|
57
consensus/types/peer_round_state.go
Normal file
57
consensus/types/peer_round_state.go
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/tendermint/tendermint/types"
|
||||||
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
//-----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// PeerRoundState contains the known state of a peer.
|
||||||
|
// NOTE: Read-only when returned by PeerState.GetRoundState().
|
||||||
|
type PeerRoundState struct {
|
||||||
|
Height int64 `json:"height"` // Height peer is at
|
||||||
|
Round int `json:"round"` // Round peer is at, -1 if unknown.
|
||||||
|
Step RoundStepType `json:"step"` // Step peer is at
|
||||||
|
StartTime time.Time `json:"start_time"` // Estimated start of round 0 at this height
|
||||||
|
Proposal bool `json:"proposal"` // True if peer has proposal for this round
|
||||||
|
ProposalBlockPartsHeader types.PartSetHeader `json:"proposal_block_parts_header"` //
|
||||||
|
ProposalBlockParts *cmn.BitArray `json:"proposal_block_parts"` //
|
||||||
|
ProposalPOLRound int `json:"proposal_pol_round"` // Proposal's POL round. -1 if none.
|
||||||
|
ProposalPOL *cmn.BitArray `json:"proposal_pol"` // nil until ProposalPOLMessage received.
|
||||||
|
Prevotes *cmn.BitArray `json:"prevotes"` // All votes peer has for this round
|
||||||
|
Precommits *cmn.BitArray `json:"precommits"` // All precommits peer has for this round
|
||||||
|
LastCommitRound int `json:"last_commit_round"` // Round of commit for last height. -1 if none.
|
||||||
|
LastCommit *cmn.BitArray `json:"last_commit"` // All commit precommits of commit for last height.
|
||||||
|
CatchupCommitRound int `json:"catchup_commit_round"` // Round that we have commit for. Not necessarily unique. -1 if none.
|
||||||
|
CatchupCommit *cmn.BitArray `json:"catchup_commit"` // All commit precommits peer has for this height & CatchupCommitRound
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the PeerRoundState
|
||||||
|
func (prs PeerRoundState) String() string {
|
||||||
|
return prs.StringIndented("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringIndented returns a string representation of the PeerRoundState
|
||||||
|
func (prs PeerRoundState) StringIndented(indent string) string {
|
||||||
|
return fmt.Sprintf(`PeerRoundState{
|
||||||
|
%s %v/%v/%v @%v
|
||||||
|
%s Proposal %v -> %v
|
||||||
|
%s POL %v (round %v)
|
||||||
|
%s Prevotes %v
|
||||||
|
%s Precommits %v
|
||||||
|
%s LastCommit %v (round %v)
|
||||||
|
%s Catchup %v (round %v)
|
||||||
|
%s}`,
|
||||||
|
indent, prs.Height, prs.Round, prs.Step, prs.StartTime,
|
||||||
|
indent, prs.ProposalBlockPartsHeader, prs.ProposalBlockParts,
|
||||||
|
indent, prs.ProposalPOL, prs.ProposalPOLRound,
|
||||||
|
indent, prs.Prevotes,
|
||||||
|
indent, prs.Precommits,
|
||||||
|
indent, prs.LastCommit, prs.LastCommitRound,
|
||||||
|
indent, prs.CatchupCommit, prs.CatchupCommitRound,
|
||||||
|
indent)
|
||||||
|
}
|
@@ -1,57 +0,0 @@
|
|||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/tendermint/tendermint/types"
|
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
|
||||||
)
|
|
||||||
|
|
||||||
//-----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// PeerRoundState contains the known state of a peer.
|
|
||||||
// NOTE: Read-only when returned by PeerState.GetRoundState().
|
|
||||||
type PeerRoundState struct {
|
|
||||||
Height int64 // Height peer is at
|
|
||||||
Round int // Round peer is at, -1 if unknown.
|
|
||||||
Step RoundStepType // Step peer is at
|
|
||||||
StartTime time.Time // Estimated start of round 0 at this height
|
|
||||||
Proposal bool // True if peer has proposal for this round
|
|
||||||
ProposalBlockPartsHeader types.PartSetHeader //
|
|
||||||
ProposalBlockParts *cmn.BitArray //
|
|
||||||
ProposalPOLRound int // Proposal's POL round. -1 if none.
|
|
||||||
ProposalPOL *cmn.BitArray // nil until ProposalPOLMessage received.
|
|
||||||
Prevotes *cmn.BitArray // All votes peer has for this round
|
|
||||||
Precommits *cmn.BitArray // All precommits peer has for this round
|
|
||||||
LastCommitRound int // Round of commit for last height. -1 if none.
|
|
||||||
LastCommit *cmn.BitArray // All commit precommits of commit for last height.
|
|
||||||
CatchupCommitRound int // Round that we have commit for. Not necessarily unique. -1 if none.
|
|
||||||
CatchupCommit *cmn.BitArray // All commit precommits peer has for this height & CatchupCommitRound
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string representation of the PeerRoundState
|
|
||||||
func (prs PeerRoundState) String() string {
|
|
||||||
return prs.StringIndented("")
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringIndented returns a string representation of the PeerRoundState
|
|
||||||
func (prs PeerRoundState) StringIndented(indent string) string {
|
|
||||||
return fmt.Sprintf(`PeerRoundState{
|
|
||||||
%s %v/%v/%v @%v
|
|
||||||
%s Proposal %v -> %v
|
|
||||||
%s POL %v (round %v)
|
|
||||||
%s Prevotes %v
|
|
||||||
%s Precommits %v
|
|
||||||
%s LastCommit %v (round %v)
|
|
||||||
%s Catchup %v (round %v)
|
|
||||||
%s}`,
|
|
||||||
indent, prs.Height, prs.Round, prs.Step, prs.StartTime,
|
|
||||||
indent, prs.ProposalBlockPartsHeader, prs.ProposalBlockParts,
|
|
||||||
indent, prs.ProposalPOL, prs.ProposalPOLRound,
|
|
||||||
indent, prs.Prevotes,
|
|
||||||
indent, prs.Precommits,
|
|
||||||
indent, prs.LastCommit, prs.LastCommitRound,
|
|
||||||
indent, prs.CatchupCommit, prs.CatchupCommitRound,
|
|
||||||
indent)
|
|
||||||
}
|
|
@@ -1,10 +1,12 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
//-----------------------------------------------------------------------------
|
//-----------------------------------------------------------------------------
|
||||||
@@ -13,6 +15,7 @@ import (
|
|||||||
// RoundStepType enumerates the state of the consensus state machine
|
// RoundStepType enumerates the state of the consensus state machine
|
||||||
type RoundStepType uint8 // These must be numeric, ordered.
|
type RoundStepType uint8 // These must be numeric, ordered.
|
||||||
|
|
||||||
|
// RoundStepType
|
||||||
const (
|
const (
|
||||||
RoundStepNewHeight = RoundStepType(0x01) // Wait til CommitTime + timeoutCommit
|
RoundStepNewHeight = RoundStepType(0x01) // Wait til CommitTime + timeoutCommit
|
||||||
RoundStepNewRound = RoundStepType(0x02) // Setup new round and go to RoundStepPropose
|
RoundStepNewRound = RoundStepType(0x02) // Setup new round and go to RoundStepPropose
|
||||||
@@ -55,37 +58,63 @@ func (rs RoundStepType) String() string {
|
|||||||
// NOTE: Not thread safe. Should only be manipulated by functions downstream
|
// NOTE: Not thread safe. Should only be manipulated by functions downstream
|
||||||
// of the cs.receiveRoutine
|
// of the cs.receiveRoutine
|
||||||
type RoundState struct {
|
type RoundState struct {
|
||||||
Height int64 // Height we are working on
|
Height int64 `json:"height"` // Height we are working on
|
||||||
Round int
|
Round int `json:"round"`
|
||||||
Step RoundStepType
|
Step RoundStepType `json:"step"`
|
||||||
StartTime time.Time
|
StartTime time.Time `json:"start_time"`
|
||||||
CommitTime time.Time // Subjective time when +2/3 precommits for Block at Round were found
|
CommitTime time.Time `json:"commit_time"` // Subjective time when +2/3 precommits for Block at Round were found
|
||||||
Validators *types.ValidatorSet
|
Validators *types.ValidatorSet `json:"validators"`
|
||||||
Proposal *types.Proposal
|
Proposal *types.Proposal `json:"proposal"`
|
||||||
ProposalBlock *types.Block
|
ProposalBlock *types.Block `json:"proposal_block"`
|
||||||
ProposalBlockParts *types.PartSet
|
ProposalBlockParts *types.PartSet `json:"proposal_block_parts"`
|
||||||
LockedRound int
|
LockedRound int `json:"locked_round"`
|
||||||
LockedBlock *types.Block
|
LockedBlock *types.Block `json:"locked_block"`
|
||||||
LockedBlockParts *types.PartSet
|
LockedBlockParts *types.PartSet `json:"locked_block_parts"`
|
||||||
ValidRound int
|
ValidRound int `json:"valid_round"` // Last known round with POL for non-nil valid block.
|
||||||
ValidBlock *types.Block
|
ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above.
|
||||||
ValidBlockParts *types.PartSet
|
ValidBlockParts *types.PartSet `json:"valid_block_parts"` // Last known block parts of POL metnioned above.
|
||||||
Votes *HeightVoteSet
|
Votes *HeightVoteSet `json:"votes"`
|
||||||
CommitRound int //
|
CommitRound int `json:"commit_round"` //
|
||||||
LastCommit *types.VoteSet // Last precommits at Height-1
|
LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1
|
||||||
LastValidators *types.ValidatorSet
|
LastValidators *types.ValidatorSet `json:"last_validators"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compressed version of the RoundState for use in RPC
|
||||||
|
type RoundStateSimple struct {
|
||||||
|
HeightRoundStep string `json:"height/round/step"`
|
||||||
|
StartTime time.Time `json:"start_time"`
|
||||||
|
ProposalBlockHash cmn.HexBytes `json:"proposal_block_hash"`
|
||||||
|
LockedBlockHash cmn.HexBytes `json:"locked_block_hash"`
|
||||||
|
ValidBlockHash cmn.HexBytes `json:"valid_block_hash"`
|
||||||
|
Votes json.RawMessage `json:"height_vote_set"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compress the RoundState to RoundStateSimple
|
||||||
|
func (rs *RoundState) RoundStateSimple() RoundStateSimple {
|
||||||
|
votesJSON, err := rs.Votes.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return RoundStateSimple{
|
||||||
|
HeightRoundStep: fmt.Sprintf("%d/%d/%d", rs.Height, rs.Round, rs.Step),
|
||||||
|
StartTime: rs.StartTime,
|
||||||
|
ProposalBlockHash: rs.ProposalBlock.Hash(),
|
||||||
|
LockedBlockHash: rs.LockedBlock.Hash(),
|
||||||
|
ValidBlockHash: rs.ValidBlock.Hash(),
|
||||||
|
Votes: votesJSON,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RoundStateEvent returns the H/R/S of the RoundState as an event.
|
// RoundStateEvent returns the H/R/S of the RoundState as an event.
|
||||||
func (rs *RoundState) RoundStateEvent() types.EventDataRoundState {
|
func (rs *RoundState) RoundStateEvent() types.EventDataRoundState {
|
||||||
// XXX: copy the RoundState
|
// XXX: copy the RoundState
|
||||||
// if we want to avoid this, we may need synchronous events after all
|
// if we want to avoid this, we may need synchronous events after all
|
||||||
rs_ := *rs
|
rsCopy := *rs
|
||||||
edrs := types.EventDataRoundState{
|
edrs := types.EventDataRoundState{
|
||||||
Height: rs.Height,
|
Height: rs.Height,
|
||||||
Round: rs.Round,
|
Round: rs.Round,
|
||||||
Step: rs.Step.String(),
|
Step: rs.Step.String(),
|
||||||
RoundState: &rs_,
|
RoundState: &rsCopy,
|
||||||
}
|
}
|
||||||
return edrs
|
return edrs
|
||||||
}
|
}
|
||||||
@@ -115,16 +144,16 @@ func (rs *RoundState) StringIndented(indent string) string {
|
|||||||
indent, rs.Height, rs.Round, rs.Step,
|
indent, rs.Height, rs.Round, rs.Step,
|
||||||
indent, rs.StartTime,
|
indent, rs.StartTime,
|
||||||
indent, rs.CommitTime,
|
indent, rs.CommitTime,
|
||||||
indent, rs.Validators.StringIndented(indent+" "),
|
indent, rs.Validators.StringIndented(indent+" "),
|
||||||
indent, rs.Proposal,
|
indent, rs.Proposal,
|
||||||
indent, rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort(),
|
indent, rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort(),
|
||||||
indent, rs.LockedRound,
|
indent, rs.LockedRound,
|
||||||
indent, rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort(),
|
indent, rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort(),
|
||||||
indent, rs.ValidRound,
|
indent, rs.ValidRound,
|
||||||
indent, rs.ValidBlockParts.StringShort(), rs.ValidBlock.StringShort(),
|
indent, rs.ValidBlockParts.StringShort(), rs.ValidBlock.StringShort(),
|
||||||
indent, rs.Votes.StringIndented(indent+" "),
|
indent, rs.Votes.StringIndented(indent+" "),
|
||||||
indent, rs.LastCommit.StringShort(),
|
indent, rs.LastCommit.StringShort(),
|
||||||
indent, rs.LastValidators.StringIndented(indent+" "),
|
indent, rs.LastValidators.StringIndented(indent+" "),
|
||||||
indent)
|
indent)
|
||||||
}
|
}
|
||||||
|
|
95
consensus/types/round_state_test.go
Normal file
95
consensus/types/round_state_test.go
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/tendermint/go-amino"
|
||||||
|
"github.com/tendermint/go-crypto"
|
||||||
|
"github.com/tendermint/tendermint/types"
|
||||||
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BenchmarkRoundStateDeepCopy(b *testing.B) {
|
||||||
|
b.StopTimer()
|
||||||
|
|
||||||
|
// Random validators
|
||||||
|
nval, ntxs := 100, 100
|
||||||
|
vset, _ := types.RandValidatorSet(nval, 1)
|
||||||
|
precommits := make([]*types.Vote, nval)
|
||||||
|
blockID := types.BlockID{
|
||||||
|
Hash: cmn.RandBytes(20),
|
||||||
|
PartsHeader: types.PartSetHeader{
|
||||||
|
Hash: cmn.RandBytes(20),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
sig := crypto.SignatureEd25519{}
|
||||||
|
for i := 0; i < nval; i++ {
|
||||||
|
precommits[i] = &types.Vote{
|
||||||
|
ValidatorAddress: types.Address(cmn.RandBytes(20)),
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
BlockID: blockID,
|
||||||
|
Signature: sig,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
txs := make([]types.Tx, ntxs)
|
||||||
|
for i := 0; i < ntxs; i++ {
|
||||||
|
txs[i] = cmn.RandBytes(100)
|
||||||
|
}
|
||||||
|
// Random block
|
||||||
|
block := &types.Block{
|
||||||
|
Header: &types.Header{
|
||||||
|
ChainID: cmn.RandStr(12),
|
||||||
|
Time: time.Now(),
|
||||||
|
LastBlockID: blockID,
|
||||||
|
LastCommitHash: cmn.RandBytes(20),
|
||||||
|
DataHash: cmn.RandBytes(20),
|
||||||
|
ValidatorsHash: cmn.RandBytes(20),
|
||||||
|
ConsensusHash: cmn.RandBytes(20),
|
||||||
|
AppHash: cmn.RandBytes(20),
|
||||||
|
LastResultsHash: cmn.RandBytes(20),
|
||||||
|
EvidenceHash: cmn.RandBytes(20),
|
||||||
|
},
|
||||||
|
Data: &types.Data{
|
||||||
|
Txs: txs,
|
||||||
|
},
|
||||||
|
Evidence: types.EvidenceData{},
|
||||||
|
LastCommit: &types.Commit{
|
||||||
|
BlockID: blockID,
|
||||||
|
Precommits: precommits,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
parts := block.MakePartSet(4096)
|
||||||
|
// Random Proposal
|
||||||
|
proposal := &types.Proposal{
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
BlockPartsHeader: types.PartSetHeader{
|
||||||
|
Hash: cmn.RandBytes(20),
|
||||||
|
},
|
||||||
|
POLBlockID: blockID,
|
||||||
|
Signature: sig,
|
||||||
|
}
|
||||||
|
// Random HeightVoteSet
|
||||||
|
// TODO: hvs :=
|
||||||
|
|
||||||
|
rs := &RoundState{
|
||||||
|
StartTime: time.Now(),
|
||||||
|
CommitTime: time.Now(),
|
||||||
|
Validators: vset,
|
||||||
|
Proposal: proposal,
|
||||||
|
ProposalBlock: block,
|
||||||
|
ProposalBlockParts: parts,
|
||||||
|
LockedBlock: block,
|
||||||
|
LockedBlockParts: parts,
|
||||||
|
ValidBlock: block,
|
||||||
|
ValidBlockParts: parts,
|
||||||
|
Votes: nil, // TODO
|
||||||
|
LastCommit: nil, // TODO
|
||||||
|
LastValidators: vset,
|
||||||
|
}
|
||||||
|
b.StartTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
amino.DeepCopy(rs)
|
||||||
|
}
|
||||||
|
}
|
12
consensus/types/wire.go
Normal file
12
consensus/types/wire.go
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/tendermint/go-amino"
|
||||||
|
"github.com/tendermint/go-crypto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cdc = amino.NewCodec()
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
crypto.RegisterAmino(cdc)
|
||||||
|
}
|
@@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/tendermint/go-amino"
|
amino "github.com/tendermint/go-amino"
|
||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
auto "github.com/tendermint/tmlibs/autofile"
|
auto "github.com/tendermint/tmlibs/autofile"
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
@@ -50,7 +50,8 @@ func RegisterWALMessages(cdc *amino.Codec) {
|
|||||||
|
|
||||||
// WAL is an interface for any write-ahead logger.
|
// WAL is an interface for any write-ahead logger.
|
||||||
type WAL interface {
|
type WAL interface {
|
||||||
Save(WALMessage)
|
Write(WALMessage)
|
||||||
|
WriteSync(WALMessage)
|
||||||
Group() *auto.Group
|
Group() *auto.Group
|
||||||
SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error)
|
SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error)
|
||||||
|
|
||||||
@@ -67,12 +68,11 @@ type baseWAL struct {
|
|||||||
cmn.BaseService
|
cmn.BaseService
|
||||||
|
|
||||||
group *auto.Group
|
group *auto.Group
|
||||||
light bool // ignore block parts
|
|
||||||
|
|
||||||
enc *WALEncoder
|
enc *WALEncoder
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewWAL(walFile string, light bool) (*baseWAL, error) {
|
func NewWAL(walFile string) (*baseWAL, error) {
|
||||||
err := cmn.EnsureDir(filepath.Dir(walFile), 0700)
|
err := cmn.EnsureDir(filepath.Dir(walFile), 0700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to ensure WAL directory is in place")
|
return nil, errors.Wrap(err, "failed to ensure WAL directory is in place")
|
||||||
@@ -84,7 +84,6 @@ func NewWAL(walFile string, light bool) (*baseWAL, error) {
|
|||||||
}
|
}
|
||||||
wal := &baseWAL{
|
wal := &baseWAL{
|
||||||
group: group,
|
group: group,
|
||||||
light: light,
|
|
||||||
enc: NewWALEncoder(group),
|
enc: NewWALEncoder(group),
|
||||||
}
|
}
|
||||||
wal.BaseService = *cmn.NewBaseService(nil, "baseWAL", wal)
|
wal.BaseService = *cmn.NewBaseService(nil, "baseWAL", wal)
|
||||||
@@ -100,7 +99,7 @@ func (wal *baseWAL) OnStart() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
} else if size == 0 {
|
} else if size == 0 {
|
||||||
wal.Save(EndHeightMessage{0})
|
wal.WriteSync(EndHeightMessage{0})
|
||||||
}
|
}
|
||||||
err = wal.group.Start()
|
err = wal.group.Start()
|
||||||
return err
|
return err
|
||||||
@@ -111,29 +110,31 @@ func (wal *baseWAL) OnStop() {
|
|||||||
wal.group.Stop()
|
wal.group.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
// called in newStep and for each pass in receiveRoutine
|
// Write is called in newStep and for each receive on the
|
||||||
func (wal *baseWAL) Save(msg WALMessage) {
|
// peerMsgQueue and the timeoutTicker.
|
||||||
|
// NOTE: does not call fsync()
|
||||||
|
func (wal *baseWAL) Write(msg WALMessage) {
|
||||||
if wal == nil {
|
if wal == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if wal.light {
|
|
||||||
// in light mode we only write new steps, timeouts, and our own votes (no proposals, block parts)
|
|
||||||
if mi, ok := msg.(msgInfo); ok {
|
|
||||||
if mi.PeerID != "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the wal message
|
// Write the wal message
|
||||||
if err := wal.enc.Encode(&TimedWALMessage{time.Now(), msg}); err != nil {
|
if err := wal.enc.Encode(&TimedWALMessage{time.Now(), msg}); err != nil {
|
||||||
cmn.PanicQ(cmn.Fmt("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg))
|
panic(cmn.Fmt("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteSync is called when we receive a msg from ourselves
|
||||||
|
// so that we write to disk before sending signed messages.
|
||||||
|
// NOTE: calls fsync()
|
||||||
|
func (wal *baseWAL) WriteSync(msg WALMessage) {
|
||||||
|
if wal == nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: only flush when necessary
|
wal.Write(msg)
|
||||||
if err := wal.group.Flush(); err != nil {
|
if err := wal.group.Flush(); err != nil {
|
||||||
cmn.PanicQ(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
|
panic(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -143,13 +144,14 @@ type WALSearchOptions struct {
|
|||||||
IgnoreDataCorruptionErrors bool
|
IgnoreDataCorruptionErrors bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// SearchForEndHeight searches for the EndHeightMessage with the height and
|
// SearchForEndHeight searches for the EndHeightMessage with the given height
|
||||||
// returns an auto.GroupReader, whenever it was found or not and an error.
|
// and returns an auto.GroupReader, whenever it was found or not and an error.
|
||||||
// Group reader will be nil if found equals false.
|
// Group reader will be nil if found equals false.
|
||||||
//
|
//
|
||||||
// CONTRACT: caller must close group reader.
|
// CONTRACT: caller must close group reader.
|
||||||
func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
||||||
var msg *TimedWALMessage
|
var msg *TimedWALMessage
|
||||||
|
lastHeightFound := int64(-1)
|
||||||
|
|
||||||
// NOTE: starting from the last file in the group because we're usually
|
// NOTE: starting from the last file in the group because we're usually
|
||||||
// searching for the last height. See replay.go
|
// searching for the last height. See replay.go
|
||||||
@@ -165,17 +167,25 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
|
|||||||
for {
|
for {
|
||||||
msg, err = dec.Decode()
|
msg, err = dec.Decode()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
|
// OPTIMISATION: no need to look for height in older files if we've seen h < height
|
||||||
|
if lastHeightFound > 0 && lastHeightFound < height {
|
||||||
|
gr.Close()
|
||||||
|
return nil, false, nil
|
||||||
|
}
|
||||||
// check next file
|
// check next file
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if options.IgnoreDataCorruptionErrors && IsDataCorruptionError(err) {
|
if options.IgnoreDataCorruptionErrors && IsDataCorruptionError(err) {
|
||||||
|
wal.Logger.Debug("Corrupted entry. Skipping...", "err", err)
|
||||||
// do nothing
|
// do nothing
|
||||||
|
continue
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
gr.Close()
|
gr.Close()
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if m, ok := msg.Msg.(EndHeightMessage); ok {
|
if m, ok := msg.Msg.(EndHeightMessage); ok {
|
||||||
|
lastHeightFound = m.Height
|
||||||
if m.Height == height { // found
|
if m.Height == height { // found
|
||||||
wal.Logger.Debug("Found", "height", height, "index", index)
|
wal.Logger.Debug("Found", "height", height, "index", index)
|
||||||
return gr, true, nil
|
return gr, true, nil
|
||||||
@@ -270,23 +280,17 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
|
|||||||
|
|
||||||
b = make([]byte, 4)
|
b = make([]byte, 4)
|
||||||
_, err = dec.rd.Read(b)
|
_, err = dec.rd.Read(b)
|
||||||
if err == io.EOF {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to read length: %v", err)
|
return nil, fmt.Errorf("failed to read length: %v", err)
|
||||||
}
|
}
|
||||||
length := binary.BigEndian.Uint32(b)
|
length := binary.BigEndian.Uint32(b)
|
||||||
|
|
||||||
if length > maxMsgSizeBytes {
|
if length > maxMsgSizeBytes {
|
||||||
return nil, DataCorruptionError{fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes)}
|
return nil, fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
data := make([]byte, length)
|
data := make([]byte, length)
|
||||||
_, err = dec.rd.Read(data)
|
_, err = dec.rd.Read(data)
|
||||||
if err == io.EOF {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to read data: %v", err)
|
return nil, fmt.Errorf("failed to read data: %v", err)
|
||||||
}
|
}
|
||||||
@@ -308,8 +312,9 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
|
|||||||
|
|
||||||
type nilWAL struct{}
|
type nilWAL struct{}
|
||||||
|
|
||||||
func (nilWAL) Save(m WALMessage) {}
|
func (nilWAL) Write(m WALMessage) {}
|
||||||
func (nilWAL) Group() *auto.Group { return nil }
|
func (nilWAL) WriteSync(m WALMessage) {}
|
||||||
|
func (nilWAL) Group() *auto.Group { return nil }
|
||||||
func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) {
|
||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
|
@@ -83,7 +83,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
|||||||
numBlocksWritten := make(chan struct{})
|
numBlocksWritten := make(chan struct{})
|
||||||
wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten)
|
wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten)
|
||||||
// see wal.go#103
|
// see wal.go#103
|
||||||
wal.Save(EndHeightMessage{0})
|
wal.Write(EndHeightMessage{0})
|
||||||
consensusState.wal = wal
|
consensusState.wal = wal
|
||||||
|
|
||||||
if err := consensusState.Start(); err != nil {
|
if err := consensusState.Start(); err != nil {
|
||||||
@@ -166,7 +166,7 @@ func newByteBufferWAL(logger log.Logger, enc *WALEncoder, nBlocks int64, signalS
|
|||||||
// Save writes message to the internal buffer except when heightToStop is
|
// Save writes message to the internal buffer except when heightToStop is
|
||||||
// reached, in which case it will signal the caller via signalWhenStopsTo and
|
// reached, in which case it will signal the caller via signalWhenStopsTo and
|
||||||
// skip writing.
|
// skip writing.
|
||||||
func (w *byteBufferWAL) Save(m WALMessage) {
|
func (w *byteBufferWAL) Write(m WALMessage) {
|
||||||
if w.stopped {
|
if w.stopped {
|
||||||
w.logger.Debug("WAL already stopped. Not writing message", "msg", m)
|
w.logger.Debug("WAL already stopped. Not writing message", "msg", m)
|
||||||
return
|
return
|
||||||
@@ -189,6 +189,10 @@ func (w *byteBufferWAL) Save(m WALMessage) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *byteBufferWAL) WriteSync(m WALMessage) {
|
||||||
|
w.Write(m)
|
||||||
|
}
|
||||||
|
|
||||||
func (w *byteBufferWAL) Group() *auto.Group {
|
func (w *byteBufferWAL) Group() *auto.Group {
|
||||||
panic("not implemented")
|
panic("not implemented")
|
||||||
}
|
}
|
||||||
|
@@ -47,7 +47,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
|||||||
}
|
}
|
||||||
walFile := tempWALWithData(walBody)
|
walFile := tempWALWithData(walBody)
|
||||||
|
|
||||||
wal, err := NewWAL(walFile, false)
|
wal, err := NewWAL(walFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@@ -8,9 +8,9 @@ services:
|
|||||||
- "46656-46657:46656-46657"
|
- "46656-46657:46656-46657"
|
||||||
environment:
|
environment:
|
||||||
- ID=0
|
- ID=0
|
||||||
- LOG=${LOG:-tendermint.log}
|
- LOG=$${LOG:-tendermint.log}
|
||||||
volumes:
|
volumes:
|
||||||
- ${FOLDER:-./build}:/tendermint:Z
|
- ./build:/tendermint:Z
|
||||||
networks:
|
networks:
|
||||||
localnet:
|
localnet:
|
||||||
ipv4_address: 192.167.10.2
|
ipv4_address: 192.167.10.2
|
||||||
@@ -22,9 +22,9 @@ services:
|
|||||||
- "46659-46660:46656-46657"
|
- "46659-46660:46656-46657"
|
||||||
environment:
|
environment:
|
||||||
- ID=1
|
- ID=1
|
||||||
- LOG=${LOG:-tendermint.log}
|
- LOG=$${LOG:-tendermint.log}
|
||||||
volumes:
|
volumes:
|
||||||
- ${FOLDER:-./build}:/tendermint:Z
|
- ./build:/tendermint:Z
|
||||||
networks:
|
networks:
|
||||||
localnet:
|
localnet:
|
||||||
ipv4_address: 192.167.10.3
|
ipv4_address: 192.167.10.3
|
||||||
@@ -34,11 +34,11 @@ services:
|
|||||||
image: "tendermint/localnode"
|
image: "tendermint/localnode"
|
||||||
environment:
|
environment:
|
||||||
- ID=2
|
- ID=2
|
||||||
- LOG=${LOG:-tendermint.log}
|
- LOG=$${LOG:-tendermint.log}
|
||||||
ports:
|
ports:
|
||||||
- "46661-46662:46656-46657"
|
- "46661-46662:46656-46657"
|
||||||
volumes:
|
volumes:
|
||||||
- ${FOLDER:-./build}:/tendermint:Z
|
- ./build:/tendermint:Z
|
||||||
networks:
|
networks:
|
||||||
localnet:
|
localnet:
|
||||||
ipv4_address: 192.167.10.4
|
ipv4_address: 192.167.10.4
|
||||||
@@ -48,11 +48,11 @@ services:
|
|||||||
image: "tendermint/localnode"
|
image: "tendermint/localnode"
|
||||||
environment:
|
environment:
|
||||||
- ID=3
|
- ID=3
|
||||||
- LOG=${LOG:-tendermint.log}
|
- LOG=$${LOG:-tendermint.log}
|
||||||
ports:
|
ports:
|
||||||
- "46663-46664:46656-46657"
|
- "46663-46664:46656-46657"
|
||||||
volumes:
|
volumes:
|
||||||
- ${FOLDER:-./build}:/tendermint:Z
|
- ./build:/tendermint:Z
|
||||||
networks:
|
networks:
|
||||||
localnet:
|
localnet:
|
||||||
ipv4_address: 192.167.10.5
|
ipv4_address: 192.167.10.5
|
||||||
|
@@ -1,40 +0,0 @@
|
|||||||
localnode
|
|
||||||
=========
|
|
||||||
|
|
||||||
It is assumed that you have already `setup docker <https://docs.docker.com/engine/installation/>`__.
|
|
||||||
|
|
||||||
Description
|
|
||||||
-----------
|
|
||||||
Image for local testnets.
|
|
||||||
|
|
||||||
Add the tendermint binary to the image by attaching it in a folder to the `/tendermint` mount point.
|
|
||||||
|
|
||||||
It assumes that the configuration was created by the `tendermint testnet` command and it is also attached to the `/tendermint` mount point.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
This example builds a linux tendermint binary under the `build/` folder, creates tendermint configuration for a single-node validator and runs the node:
|
|
||||||
```
|
|
||||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
|
||||||
|
|
||||||
#Build binary
|
|
||||||
make build-linux
|
|
||||||
|
|
||||||
#Create configuration
|
|
||||||
docker run -e LOG="stdout" -v `pwd`/build:/tendermint tendermint/localnode testnet --o . --v 1
|
|
||||||
|
|
||||||
#Run the node
|
|
||||||
docker run -v `pwd`/build:/tendermint tendermint/localnode
|
|
||||||
```
|
|
||||||
|
|
||||||
Logging
|
|
||||||
-------
|
|
||||||
Log is saved under the attached volume, in the `tendermint.log` file. If the `LOG` environment variable is set to `stdout` at start, the log is not saved, but printed on the screen.
|
|
||||||
|
|
||||||
Special binaries
|
|
||||||
----------------
|
|
||||||
If you have multiple binaries with different names, you can specify which one to run with the BINARY environment variable. The path of the binary is relative to the attached volume.
|
|
||||||
|
|
||||||
docker-compose.yml
|
|
||||||
==================
|
|
||||||
This file creates a 4-node network using the localnode image. The nodes of the network are exposed to the host machine on ports 46656-46657, 46659-46660, 46661-46662, 46663-46664 respectively.
|
|
||||||
|
|
@@ -1,33 +0,0 @@
|
|||||||
#!/usr/bin/env sh
|
|
||||||
|
|
||||||
##
|
|
||||||
## Input parameters
|
|
||||||
##
|
|
||||||
BINARY=/tendermint/${BINARY:-tendermint}
|
|
||||||
ID=${ID:-0}
|
|
||||||
LOG=${LOG:-tendermint.log}
|
|
||||||
|
|
||||||
##
|
|
||||||
## Assert linux binary
|
|
||||||
##
|
|
||||||
if ! [ -f "${BINARY}" ]; then
|
|
||||||
echo "The binary `basename ${BINARY}` cannot be found. Please add the binary to the shared folder. Please use the BINARY environment variable if the name of the binary is not 'tendermint' E.g.: -e BINARY=tendermint_my_test_version"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
BINARY_CHECK="`file $BINARY | grep 'ELF 64-bit LSB executable, x86-64'`"
|
|
||||||
if [ -z "${BINARY_CHECK}" ]; then
|
|
||||||
echo "Binary needs to be OS linux, ARCH amd64"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
##
|
|
||||||
## Run binary with all parameters
|
|
||||||
##
|
|
||||||
export TMHOME="/tendermint/node${ID}"
|
|
||||||
|
|
||||||
if [ -d "${TMHOME}/${LOG}" ]; then
|
|
||||||
"$BINARY" $@ | tee "${TMHOME}/${LOG}"
|
|
||||||
else
|
|
||||||
"$BINARY" $@
|
|
||||||
fi
|
|
||||||
|
|
@@ -183,6 +183,7 @@ Try running these commands:
|
|||||||
|
|
||||||
> commit
|
> commit
|
||||||
-> code: OK
|
-> code: OK
|
||||||
|
-> data.hex: 0x0000000000000000
|
||||||
|
|
||||||
> deliver_tx "abc"
|
> deliver_tx "abc"
|
||||||
-> code: OK
|
-> code: OK
|
||||||
@@ -194,7 +195,7 @@ Try running these commands:
|
|||||||
|
|
||||||
> commit
|
> commit
|
||||||
-> code: OK
|
-> code: OK
|
||||||
-> data.hex: 0x49DFD15CCDACDEAE9728CB01FBB5E8688CA58B91
|
-> data.hex: 0x0200000000000000
|
||||||
|
|
||||||
> query "abc"
|
> query "abc"
|
||||||
-> code: OK
|
-> code: OK
|
||||||
@@ -208,7 +209,7 @@ Try running these commands:
|
|||||||
|
|
||||||
> commit
|
> commit
|
||||||
-> code: OK
|
-> code: OK
|
||||||
-> data.hex: 0x70102DB32280373FBF3F9F89DA2A20CE2CD62B0B
|
-> data.hex: 0x0400000000000000
|
||||||
|
|
||||||
> query "def"
|
> query "def"
|
||||||
-> code: OK
|
-> code: OK
|
||||||
@@ -301,6 +302,7 @@ In another window, start the ``abci-cli console``:
|
|||||||
|
|
||||||
> set_option serial on
|
> set_option serial on
|
||||||
-> code: OK
|
-> code: OK
|
||||||
|
-> log: OK (SetOption doesn't return anything.)
|
||||||
|
|
||||||
> check_tx 0x00
|
> check_tx 0x00
|
||||||
-> code: OK
|
-> code: OK
|
||||||
|
@@ -66,15 +66,14 @@ and possibly await a response). And one method to query app-specific
|
|||||||
data from the ABCI application.
|
data from the ABCI application.
|
||||||
|
|
||||||
Pros:
|
Pros:
|
||||||
* Server code already written
|
|
||||||
* Access to block headers to validate merkle proofs (nice for light clients)
|
- Server code already written
|
||||||
* Basic read/write functionality is supported
|
- Access to block headers to validate merkle proofs (nice for light clients)
|
||||||
|
- Basic read/write functionality is supported
|
||||||
|
|
||||||
Cons:
|
Cons:
|
||||||
* Limited interface to app. All queries must be serialized into
|
|
||||||
[]byte (less expressive than JSON over HTTP) and there is no way to push
|
- Limited interface to app. All queries must be serialized into []byte (less expressive than JSON over HTTP) and there is no way to push data from ABCI app to the client (eg. notify me if account X receives a transaction)
|
||||||
data from ABCI app to the client (eg. notify me if account X receives a
|
|
||||||
transaction)
|
|
||||||
|
|
||||||
Custom ABCI server
|
Custom ABCI server
|
||||||
~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~
|
||||||
@@ -92,14 +91,19 @@ store. For "reads", we can do any queries we wish that are supported by
|
|||||||
our architecture, using any web technology that is useful. The general
|
our architecture, using any web technology that is useful. The general
|
||||||
architecture is shown in the following diagram:
|
architecture is shown in the following diagram:
|
||||||
|
|
||||||
Pros: \* Separates application logic from blockchain logic \* Allows
|
.. figure:: assets/tm-application-example.png
|
||||||
much richer, more flexible client-facing API \* Allows pub-sub, watching
|
|
||||||
certain fields, etc.
|
|
||||||
|
|
||||||
Cons: \* Access to ABCI app can be dangerous (be VERY careful not to
|
Pros:
|
||||||
write unless it comes from the validator node) \* No direct access to
|
|
||||||
the blockchain headers to verify tx \* You must write your own API (but
|
- Separates application logic from blockchain logic
|
||||||
maybe that's a pro...)
|
- Allows much richer, more flexible client-facing API
|
||||||
|
- Allows pub-sub, watching certain fields, etc.
|
||||||
|
|
||||||
|
Cons:
|
||||||
|
|
||||||
|
- Access to ABCI app can be dangerous (be VERY careful not to write unless it comes from the validator node)
|
||||||
|
- No direct access to the blockchain headers to verify tx
|
||||||
|
- You must write your own API (but maybe that's a pro...)
|
||||||
|
|
||||||
Hybrid solutions
|
Hybrid solutions
|
||||||
~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~
|
||||||
@@ -108,9 +112,13 @@ Likely the least secure but most versatile. The client can access both
|
|||||||
the tendermint node for all blockchain info, as well as a custom app
|
the tendermint node for all blockchain info, as well as a custom app
|
||||||
server, for complex queries and pub-sub on the abci app.
|
server, for complex queries and pub-sub on the abci app.
|
||||||
|
|
||||||
Pros: All from both above solutions
|
Pros:
|
||||||
|
|
||||||
Cons: Even more complexity; even more attack vectors (less
|
- All from both above solutions
|
||||||
|
|
||||||
|
Cons:
|
||||||
|
|
||||||
|
- Even more complexity; even more attack vectors (less
|
||||||
security)
|
security)
|
||||||
|
|
||||||
Scalability
|
Scalability
|
||||||
|
@@ -178,21 +178,22 @@ connection, to query the local state of the app.
|
|||||||
Mempool Connection
|
Mempool Connection
|
||||||
~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The mempool connection is used *only* for CheckTx requests. Transactions
|
The mempool connection is used *only* for CheckTx requests.
|
||||||
are run using CheckTx in the same order they were received by the
|
Transactions are run using CheckTx in the same order they were
|
||||||
validator. If the CheckTx returns ``OK``, the transaction is kept in
|
received by the validator. If the CheckTx returns ``OK``, the
|
||||||
memory and relayed to other peers in the same order it was received.
|
transaction is kept in memory and relayed to other peers in the same
|
||||||
Otherwise, it is discarded.
|
order it was received. Otherwise, it is discarded.
|
||||||
|
|
||||||
CheckTx requests run concurrently with block processing; so they should
|
CheckTx requests run concurrently with block processing; so they
|
||||||
run against a copy of the main application state which is reset after
|
should run against a copy of the main application state which is reset
|
||||||
every block. This copy is necessary to track transitions made by a
|
after every block. This copy is necessary to track transitions made by
|
||||||
sequence of CheckTx requests before they are included in a block. When a
|
a sequence of CheckTx requests before they are included in a block.
|
||||||
block is committed, the application must ensure to reset the mempool
|
When a block is committed, the application must ensure to reset the
|
||||||
state to the latest committed state. Tendermint Core will then filter
|
mempool state to the latest committed state. Tendermint Core will then
|
||||||
through all transactions in the mempool, removing any that were included
|
filter through all transactions in the mempool, removing any that were
|
||||||
in the block, and re-run the rest using CheckTx against the post-Commit
|
included in the block, and re-run the rest using CheckTx against the
|
||||||
mempool state.
|
post-Commit mempool state (this behaviour can be turned off with
|
||||||
|
``[mempool] recheck = false``).
|
||||||
|
|
||||||
.. container:: toggle
|
.. container:: toggle
|
||||||
|
|
||||||
@@ -226,6 +227,23 @@ mempool state.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Replay Protection
|
||||||
|
^^^^^^^^^^^^^^^^^
|
||||||
|
To prevent old transactions from being replayed, CheckTx must
|
||||||
|
implement replay protection.
|
||||||
|
|
||||||
|
Tendermint provides the first defence layer by keeping a lightweight
|
||||||
|
in-memory cache of 100k (``[mempool] cache_size``) last transactions in
|
||||||
|
the mempool. If Tendermint is just started or the clients sent more
|
||||||
|
than 100k transactions, old transactions may be sent to the
|
||||||
|
application. So it is important CheckTx implements some logic to
|
||||||
|
handle them.
|
||||||
|
|
||||||
|
There are cases where a transaction will (or may) become valid in some
|
||||||
|
future state, in which case you probably want to disable Tendermint's
|
||||||
|
cache. You can do that by setting ``[mempool] cache_size = 0`` in the
|
||||||
|
config.
|
||||||
|
|
||||||
Consensus Connection
|
Consensus Connection
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
BIN
docs/assets/a_plus_t.png
Normal file
BIN
docs/assets/a_plus_t.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 14 KiB |
BIN
docs/assets/tm-application-example.png
Normal file
BIN
docs/assets/tm-application-example.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 26 KiB |
14
docs/conf.py
14
docs/conf.py
@@ -71,7 +71,7 @@ language = None
|
|||||||
# List of patterns, relative to source directory, that match files and
|
# List of patterns, relative to source directory, that match files and
|
||||||
# directories to ignore when looking for source files.
|
# directories to ignore when looking for source files.
|
||||||
# This patterns also effect to html_static_path and html_extra_path
|
# This patterns also effect to html_static_path and html_extra_path
|
||||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture', 'specification/new-spec', 'examples']
|
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture', 'spec', 'examples']
|
||||||
|
|
||||||
# The name of the Pygments (syntax highlighting) style to use.
|
# The name of the Pygments (syntax highlighting) style to use.
|
||||||
pygments_style = 'sphinx'
|
pygments_style = 'sphinx'
|
||||||
@@ -184,20 +184,10 @@ if os.path.isdir(tools_dir) != True:
|
|||||||
if os.path.isdir(assets_dir) != True:
|
if os.path.isdir(assets_dir) != True:
|
||||||
os.mkdir(assets_dir)
|
os.mkdir(assets_dir)
|
||||||
|
|
||||||
urllib.urlretrieve(tools_repo+tools_branch+'/ansible/README.rst', filename=tools_dir+'/ansible.rst')
|
|
||||||
urllib.urlretrieve(tools_repo+tools_branch+'/ansible/assets/a_plus_t.png', filename=assets_dir+'/a_plus_t.png')
|
|
||||||
|
|
||||||
urllib.urlretrieve(tools_repo+tools_branch+'/docker/README.rst', filename=tools_dir+'/docker.rst')
|
urllib.urlretrieve(tools_repo+tools_branch+'/docker/README.rst', filename=tools_dir+'/docker.rst')
|
||||||
|
|
||||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/README.rst', filename=tools_dir+'/mintnet-kubernetes.rst')
|
|
||||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/gce1.png', filename=assets_dir+'/gce1.png')
|
|
||||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/gce2.png', filename=assets_dir+'/gce2.png')
|
|
||||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/statefulset.png', filename=assets_dir+'/statefulset.png')
|
|
||||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets_dir+'/t_plus_k.png')
|
|
||||||
|
|
||||||
urllib.urlretrieve(tools_repo+tools_branch+'/terraform-digitalocean/README.rst', filename=tools_dir+'/terraform-digitalocean.rst')
|
|
||||||
urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking.rst')
|
urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking.rst')
|
||||||
urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/monitoring.rst')
|
urllib.urlretrieve(tools_repo+tools_branch+'/tm-monitor/README.rst', filename='tools/monitoring.rst')
|
||||||
|
|
||||||
#### abci spec #################################
|
#### abci spec #################################
|
||||||
|
|
||||||
|
@@ -3,8 +3,7 @@ Deploy a Testnet
|
|||||||
|
|
||||||
Now that we've seen how ABCI works, and even played with a few
|
Now that we've seen how ABCI works, and even played with a few
|
||||||
applications on a single validator node, it's time to deploy a test
|
applications on a single validator node, it's time to deploy a test
|
||||||
network to four validator nodes. For this deployment, we'll use the
|
network to four validator nodes.
|
||||||
``basecoin`` application.
|
|
||||||
|
|
||||||
Manual Deployments
|
Manual Deployments
|
||||||
------------------
|
------------------
|
||||||
@@ -24,67 +23,42 @@ Here are the steps to setting up a testnet manually:
|
|||||||
``tendermint init``
|
``tendermint init``
|
||||||
4) Compile a list of public keys for each validator into a
|
4) Compile a list of public keys for each validator into a
|
||||||
``genesis.json`` file and replace the existing file with it.
|
``genesis.json`` file and replace the existing file with it.
|
||||||
5) Run ``tendermint node --p2p.persistent_peers=< peer addresses >`` on each node,
|
5) Run ``tendermint node --proxy_app=kvstore --p2p.persistent_peers=< peer addresses >`` on each node,
|
||||||
where ``< peer addresses >`` is a comma separated list of the IP:PORT
|
where ``< peer addresses >`` is a comma separated list of the IP:PORT
|
||||||
combination for each node. The default port for Tendermint is
|
combination for each node. The default port for Tendermint is
|
||||||
``46656``. Thus, if the IP addresses of your nodes were
|
``46656``. Thus, if the IP addresses of your nodes were
|
||||||
``192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4``, the command
|
``192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4``, the command
|
||||||
would look like:
|
would look like:
|
||||||
``tendermint node --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:46656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:46656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:46656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:46656``.
|
|
||||||
|
::
|
||||||
|
|
||||||
|
tendermint node --proxy_app=kvstore --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:46656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:46656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:46656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:46656
|
||||||
|
|
||||||
After a few seconds, all the nodes should connect to each other and start
|
After a few seconds, all the nodes should connect to each other and start
|
||||||
making blocks! For more information, see the Tendermint Networks section
|
making blocks! For more information, see the Tendermint Networks section
|
||||||
of `the guide to using Tendermint <using-tendermint.html>`__.
|
of `the guide to using Tendermint <using-tendermint.html>`__.
|
||||||
|
|
||||||
|
But wait! Steps 3 and 4 are quite manual. Instead, use `this script <https://github.com/tendermint/tendermint/blob/develop/docs/examples/init_testnet.sh>`__, which does the heavy lifting for you. And it gets better.
|
||||||
|
|
||||||
|
Instead of the previously linked script to initialize the files required for a testnet, we have the ``tendermint testnet`` command. By default, running ``tendermint testnet`` will create all the required files, just like the script. Of course, you'll still need to manually edit some fields in the ``config.toml``. Alternatively, see the available flags to auto-populate the ``config.toml`` with the fields that would otherwise be passed in via flags when running ``tendermint node``. As you might imagine, this command is useful for manual or automated deployments.
|
||||||
|
|
||||||
Automated Deployments
|
Automated Deployments
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
While the manual deployment is easy enough, an automated deployment is
|
The easiest and fastest way to get a testnet up in less than 5 minutes.
|
||||||
usually quicker. The below examples show different tools that can be used
|
|
||||||
for automated deployments.
|
|
||||||
|
|
||||||
Automated Deployment using Kubernetes
|
Local
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^
|
||||||
|
|
||||||
The `mintnet-kubernetes tool <https://github.com/tendermint/tools/tree/master/mintnet-kubernetes>`__
|
With ``docker`` and ``docker-compose`` installed, run the command:
|
||||||
allows automating the deployment of a Tendermint network on an already
|
|
||||||
provisioned Kubernetes cluster. For simple provisioning of a Kubernetes
|
|
||||||
cluster, check out the `Google Cloud Platform <https://cloud.google.com/>`__.
|
|
||||||
|
|
||||||
Automated Deployment using Terraform and Ansible
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
The `terraform-digitalocean tool <https://github.com/tendermint/tools/tree/master/terraform-digitalocean>`__
|
|
||||||
allows creating a set of servers on the DigitalOcean cloud.
|
|
||||||
|
|
||||||
The `ansible playbooks <https://github.com/tendermint/tools/tree/master/ansible>`__
|
|
||||||
allow creating and managing a ``basecoin`` or ``ethermint`` testnet on provisioned servers.
|
|
||||||
|
|
||||||
Package Deployment on Linux for developers
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
The ``tendermint`` and ``basecoin`` applications can be installed from RPM or DEB packages on
|
|
||||||
Linux machines for development purposes. The packages are configured to be validators on the
|
|
||||||
one-node network that the machine represents. The services are not started after installation,
|
|
||||||
this way giving an opportunity to reconfigure the applications before starting.
|
|
||||||
|
|
||||||
The Ansible playbooks in the previous section use this repository to install ``basecoin``.
|
|
||||||
After installation, additional steps are executed to make sure that the multi-node testnet has
|
|
||||||
the right configuration before start.
|
|
||||||
|
|
||||||
Install from the CentOS/RedHat repository:
|
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
rpm --import https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint
|
make localnet-start
|
||||||
wget -O /etc/yum.repos.d/tendermint.repo https://tendermint-packages.interblock.io/centos/7/os/x86_64/tendermint.repo
|
|
||||||
yum install basecoin
|
|
||||||
|
|
||||||
Install from the Debian/Ubuntu repository:
|
from the root of the tendermint repository. This will spin up a 4-node local testnet.
|
||||||
|
|
||||||
::
|
Cloud
|
||||||
|
^^^^^
|
||||||
wget -O - https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | apt-key add -
|
|
||||||
wget -O /etc/apt/sources.list.d/tendermint.list https://tendermint-packages.interblock.io/debian/tendermint.list
|
|
||||||
apt-get update && apt-get install basecoin
|
|
||||||
|
|
||||||
|
See the `next section <./terraform-and-ansible.html>`__ for details.
|
||||||
|
@@ -10,10 +10,10 @@ documentation](http://tendermint.readthedocs.io/en/master/).
|
|||||||
|
|
||||||
### Quick Install
|
### Quick Install
|
||||||
|
|
||||||
On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vNLfY), like so:
|
On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vpgEI), like so:
|
||||||
|
|
||||||
```
|
```
|
||||||
curl -L https://git.io/vxWlX | bash
|
curl -L https://git.io/vpgEI | bash
|
||||||
source ~/.profile
|
source ~/.profile
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -24,7 +24,7 @@ The script is also used to facilitate cluster deployment below.
|
|||||||
### Manual Install
|
### Manual Install
|
||||||
|
|
||||||
Requires:
|
Requires:
|
||||||
- `go` minimum version 1.9
|
- `go` minimum version 1.10
|
||||||
- `$GOPATH` environment variable must be set
|
- `$GOPATH` environment variable must be set
|
||||||
- `$GOPATH/bin` must be on your `$PATH` (see https://github.com/tendermint/tendermint/wiki/Setting-GOPATH)
|
- `$GOPATH/bin` must be on your `$PATH` (see https://github.com/tendermint/tendermint/wiki/Setting-GOPATH)
|
||||||
|
|
||||||
@@ -125,7 +125,7 @@ addresses below as IP1, IP2, IP3, IP4.
|
|||||||
Then, `ssh` into each machine, and execute [this script](https://git.io/vNLfY):
|
Then, `ssh` into each machine, and execute [this script](https://git.io/vNLfY):
|
||||||
|
|
||||||
```
|
```
|
||||||
curl -L https://git.io/vNLfY | bash
|
curl -L https://git.io/vpgEI | bash
|
||||||
source ~/.profile
|
source ~/.profile
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -134,10 +134,10 @@ This will install `go` and other dependencies, get the Tendermint source code, t
|
|||||||
Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence:
|
Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence:
|
||||||
|
|
||||||
```
|
```
|
||||||
tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
|
tendermint node --home ./node0 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
|
||||||
tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
|
tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
|
||||||
tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
|
tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
|
||||||
tendermint node --home ./node4 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
|
tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that after the third node is started, blocks will start to stream in
|
Note that after the third node is started, blocks will start to stream in
|
||||||
|
69
docs/examples/init_testnet.sh
Normal file
69
docs/examples/init_testnet.sh
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# make all the files
|
||||||
|
tendermint init --home ./tester/node0
|
||||||
|
tendermint init --home ./tester/node1
|
||||||
|
tendermint init --home ./tester/node2
|
||||||
|
tendermint init --home ./tester/node3
|
||||||
|
|
||||||
|
file0=./tester/node0/config/genesis.json
|
||||||
|
file1=./tester/node1/config/genesis.json
|
||||||
|
file2=./tester/node2/config/genesis.json
|
||||||
|
file3=./tester/node3/config/genesis.json
|
||||||
|
|
||||||
|
genesis_time=`cat $file0 | jq '.genesis_time'`
|
||||||
|
chain_id=`cat $file0 | jq '.chain_id'`
|
||||||
|
|
||||||
|
value0=`cat $file0 | jq '.validators[0].pub_key.value'`
|
||||||
|
value1=`cat $file1 | jq '.validators[0].pub_key.value'`
|
||||||
|
value2=`cat $file2 | jq '.validators[0].pub_key.value'`
|
||||||
|
value3=`cat $file3 | jq '.validators[0].pub_key.value'`
|
||||||
|
|
||||||
|
rm $file0
|
||||||
|
rm $file1
|
||||||
|
rm $file2
|
||||||
|
rm $file3
|
||||||
|
|
||||||
|
echo "{
|
||||||
|
\"genesis_time\": $genesis_time,
|
||||||
|
\"chain_id\": $chain_id,
|
||||||
|
\"validators\": [
|
||||||
|
{
|
||||||
|
\"pub_key\": {
|
||||||
|
\"type\": \"AC26791624DE60\",
|
||||||
|
\"value\": $value0
|
||||||
|
},
|
||||||
|
\"power:\": 10,
|
||||||
|
\"name\":, \"\"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
\"pub_key\": {
|
||||||
|
\"type\": \"AC26791624DE60\",
|
||||||
|
\"value\": $value1
|
||||||
|
},
|
||||||
|
\"power:\": 10,
|
||||||
|
\"name\":, \"\"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
\"pub_key\": {
|
||||||
|
\"type\": \"AC26791624DE60\",
|
||||||
|
\"value\": $value2
|
||||||
|
},
|
||||||
|
\"power:\": 10,
|
||||||
|
\"name\":, \"\"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
\"pub_key\": {
|
||||||
|
\"type\": \"AC26791624DE60\",
|
||||||
|
\"value\": $value3
|
||||||
|
},
|
||||||
|
\"power:\": 10,
|
||||||
|
\"name\":, \"\"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
\"app_hash\": \"\"
|
||||||
|
}" >> $file0
|
||||||
|
|
||||||
|
cp $file0 $file1
|
||||||
|
cp $file0 $file2
|
||||||
|
cp $file2 $file3
|
@@ -26,7 +26,7 @@ go get $REPO
|
|||||||
cd $GOPATH/src/$REPO
|
cd $GOPATH/src/$REPO
|
||||||
|
|
||||||
## build
|
## build
|
||||||
git checkout v0.18.0
|
git checkout master
|
||||||
make get_tools
|
make get_tools
|
||||||
make get_vendor_deps
|
make get_vendor_deps
|
||||||
make install
|
make install
|
||||||
|
169
docs/examples/node0/config/config.toml
Normal file
169
docs/examples/node0/config/config.toml
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
# This is a TOML config file.
|
||||||
|
# For more information, see https://github.com/toml-lang/toml
|
||||||
|
|
||||||
|
##### main base config options #####
|
||||||
|
|
||||||
|
# TCP or UNIX socket address of the ABCI application,
|
||||||
|
# or the name of an ABCI application compiled in with the Tendermint binary
|
||||||
|
proxy_app = "tcp://127.0.0.1:46658"
|
||||||
|
|
||||||
|
# A custom human readable name for this node
|
||||||
|
moniker = "alpha"
|
||||||
|
|
||||||
|
# If this node is many blocks behind the tip of the chain, FastSync
|
||||||
|
# allows them to catchup quickly by downloading blocks in parallel
|
||||||
|
# and verifying their commits
|
||||||
|
fast_sync = true
|
||||||
|
|
||||||
|
# Database backend: leveldb | memdb
|
||||||
|
db_backend = "leveldb"
|
||||||
|
|
||||||
|
# Database directory
|
||||||
|
db_path = "data"
|
||||||
|
|
||||||
|
# Output level for logging, including package level options
|
||||||
|
log_level = "main:info,state:info,*:error"
|
||||||
|
|
||||||
|
##### additional base config options #####
|
||||||
|
|
||||||
|
# Path to the JSON file containing the initial validator set and other meta data
|
||||||
|
genesis_file = "config/genesis.json"
|
||||||
|
|
||||||
|
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||||
|
priv_validator_file = "config/priv_validator.json"
|
||||||
|
|
||||||
|
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
|
||||||
|
node_key_file = "config/node_key.json"
|
||||||
|
|
||||||
|
# Mechanism to connect to the ABCI application: socket | grpc
|
||||||
|
abci = "socket"
|
||||||
|
|
||||||
|
# TCP or UNIX socket address for the profiling server to listen on
|
||||||
|
prof_laddr = ""
|
||||||
|
|
||||||
|
# If true, query the ABCI app on connecting to a new peer
|
||||||
|
# so the app can decide if we should keep the connection or not
|
||||||
|
filter_peers = false
|
||||||
|
|
||||||
|
##### advanced configuration options #####
|
||||||
|
|
||||||
|
##### rpc server configuration options #####
|
||||||
|
[rpc]
|
||||||
|
|
||||||
|
# TCP or UNIX socket address for the RPC server to listen on
|
||||||
|
laddr = "tcp://0.0.0.0:46657"
|
||||||
|
|
||||||
|
# TCP or UNIX socket address for the gRPC server to listen on
|
||||||
|
# NOTE: This server only supports /broadcast_tx_commit
|
||||||
|
grpc_laddr = ""
|
||||||
|
|
||||||
|
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||||
|
unsafe = false
|
||||||
|
|
||||||
|
##### peer to peer configuration options #####
|
||||||
|
[p2p]
|
||||||
|
|
||||||
|
# Address to listen for incoming connections
|
||||||
|
laddr = "tcp://0.0.0.0:46656"
|
||||||
|
|
||||||
|
# Comma separated list of seed nodes to connect to
|
||||||
|
seeds = ""
|
||||||
|
|
||||||
|
# Comma separated list of nodes to keep persistent connections to
|
||||||
|
# Do not add private peers to this list if you don't want them advertised
|
||||||
|
persistent_peers = ""
|
||||||
|
|
||||||
|
# Path to address book
|
||||||
|
addr_book_file = "config/addrbook.json"
|
||||||
|
|
||||||
|
# Set true for strict address routability rules
|
||||||
|
addr_book_strict = true
|
||||||
|
|
||||||
|
# Time to wait before flushing messages out on the connection, in ms
|
||||||
|
flush_throttle_timeout = 100
|
||||||
|
|
||||||
|
# Maximum number of peers to connect to
|
||||||
|
max_num_peers = 50
|
||||||
|
|
||||||
|
# Maximum size of a message packet payload, in bytes
|
||||||
|
max_packet_msg_payload_size = 1024
|
||||||
|
|
||||||
|
# Rate at which packets can be sent, in bytes/second
|
||||||
|
send_rate = 512000
|
||||||
|
|
||||||
|
# Rate at which packets can be received, in bytes/second
|
||||||
|
recv_rate = 512000
|
||||||
|
|
||||||
|
# Set true to enable the peer-exchange reactor
|
||||||
|
pex = true
|
||||||
|
|
||||||
|
# Seed mode, in which node constantly crawls the network and looks for
|
||||||
|
# peers. If another node asks it for addresses, it responds and disconnects.
|
||||||
|
#
|
||||||
|
# Does not work if the peer-exchange reactor is disabled.
|
||||||
|
seed_mode = false
|
||||||
|
|
||||||
|
# Authenticated encryption
|
||||||
|
auth_enc = true
|
||||||
|
|
||||||
|
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||||
|
private_peer_ids = ""
|
||||||
|
|
||||||
|
##### mempool configuration options #####
|
||||||
|
[mempool]
|
||||||
|
|
||||||
|
recheck = true
|
||||||
|
recheck_empty = true
|
||||||
|
broadcast = true
|
||||||
|
wal_dir = "data/mempool.wal"
|
||||||
|
|
||||||
|
##### consensus configuration options #####
|
||||||
|
[consensus]
|
||||||
|
|
||||||
|
wal_file = "data/cs.wal/wal"
|
||||||
|
|
||||||
|
# All timeouts are in milliseconds
|
||||||
|
timeout_propose = 3000
|
||||||
|
timeout_propose_delta = 500
|
||||||
|
timeout_prevote = 1000
|
||||||
|
timeout_prevote_delta = 500
|
||||||
|
timeout_precommit = 1000
|
||||||
|
timeout_precommit_delta = 500
|
||||||
|
timeout_commit = 1000
|
||||||
|
|
||||||
|
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||||
|
skip_timeout_commit = false
|
||||||
|
|
||||||
|
# BlockSize
|
||||||
|
max_block_size_txs = 10000
|
||||||
|
max_block_size_bytes = 1
|
||||||
|
|
||||||
|
# EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||||
|
create_empty_blocks = true
|
||||||
|
create_empty_blocks_interval = 0
|
||||||
|
|
||||||
|
# Reactor sleep duration parameters are in milliseconds
|
||||||
|
peer_gossip_sleep_duration = 100
|
||||||
|
peer_query_maj23_sleep_duration = 2000
|
||||||
|
|
||||||
|
##### transactions indexer configuration options #####
|
||||||
|
[tx_index]
|
||||||
|
|
||||||
|
# What indexer to use for transactions
|
||||||
|
#
|
||||||
|
# Options:
|
||||||
|
# 1) "null" (default)
|
||||||
|
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||||
|
indexer = "kv"
|
||||||
|
|
||||||
|
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||||
|
#
|
||||||
|
# It's recommended to index only a subset of tags due to possible memory
|
||||||
|
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||||
|
# transactions.
|
||||||
|
index_tags = ""
|
||||||
|
|
||||||
|
# When set to true, tells indexer to index all tags. Note this may be not
|
||||||
|
# desirable (see the comment above). IndexTags has a precedence over
|
||||||
|
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||||
|
index_all_tags = false
|
39
docs/examples/node0/config/genesis.json
Normal file
39
docs/examples/node0/config/genesis.json
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
{
|
||||||
|
"genesis_time": "0001-01-01T00:00:00Z",
|
||||||
|
"chain_id": "test-chain-A2i3OZ",
|
||||||
|
"validators": [
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"app_hash": ""
|
||||||
|
}
|
1
docs/examples/node0/config/node_key.json
Normal file
1
docs/examples/node0/config/node_key.json
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"priv_key":{"type":"954568A3288910","value":"7lY+k6EDllG8Q9gVbF5313t/ag2YGkBVKdVa0YHJ9xO5k0w3Q/hke0Z7UFT1KgVDGRUEKzwAwwjwFQUvgF0ZWg=="}}
|
14
docs/examples/node0/config/priv_validator.json
Normal file
14
docs/examples/node0/config/priv_validator.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"address": "122A9414774A2FCAD026201DA477EF3F41970EF0",
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||||
|
},
|
||||||
|
"last_height": 0,
|
||||||
|
"last_round": 0,
|
||||||
|
"last_step": 0,
|
||||||
|
"priv_key": {
|
||||||
|
"type": "954568A3288910",
|
||||||
|
"value": "YLxp3ho+kySgAnzjBptbxDzSGw2ntGZLsIHQsaVxY/cP6TgB2Odg9ZsH3CZp3XfsF2mj+QC6U6hNFCsvL9BziQ=="
|
||||||
|
}
|
||||||
|
}
|
@@ -1,15 +0,0 @@
|
|||||||
# This is a TOML config file.
|
|
||||||
# For more information, see https://github.com/toml-lang/toml
|
|
||||||
|
|
||||||
proxy_app = "tcp://127.0.0.1:46658"
|
|
||||||
moniker = "penguin"
|
|
||||||
fast_sync = true
|
|
||||||
db_backend = "leveldb"
|
|
||||||
log_level = "state:info,*:error"
|
|
||||||
|
|
||||||
[rpc]
|
|
||||||
laddr = "tcp://0.0.0.0:46657"
|
|
||||||
|
|
||||||
[p2p]
|
|
||||||
laddr = "tcp://0.0.0.0:46656"
|
|
||||||
seeds = ""
|
|
169
docs/examples/node1/config/config.toml
Normal file
169
docs/examples/node1/config/config.toml
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
# This is a TOML config file.
|
||||||
|
# For more information, see https://github.com/toml-lang/toml
|
||||||
|
|
||||||
|
##### main base config options #####
|
||||||
|
|
||||||
|
# TCP or UNIX socket address of the ABCI application,
|
||||||
|
# or the name of an ABCI application compiled in with the Tendermint binary
|
||||||
|
proxy_app = "tcp://127.0.0.1:46658"
|
||||||
|
|
||||||
|
# A custom human readable name for this node
|
||||||
|
moniker = "bravo"
|
||||||
|
|
||||||
|
# If this node is many blocks behind the tip of the chain, FastSync
|
||||||
|
# allows them to catchup quickly by downloading blocks in parallel
|
||||||
|
# and verifying their commits
|
||||||
|
fast_sync = true
|
||||||
|
|
||||||
|
# Database backend: leveldb | memdb
|
||||||
|
db_backend = "leveldb"
|
||||||
|
|
||||||
|
# Database directory
|
||||||
|
db_path = "data"
|
||||||
|
|
||||||
|
# Output level for logging, including package level options
|
||||||
|
log_level = "main:info,state:info,*:error"
|
||||||
|
|
||||||
|
##### additional base config options #####
|
||||||
|
|
||||||
|
# Path to the JSON file containing the initial validator set and other meta data
|
||||||
|
genesis_file = "config/genesis.json"
|
||||||
|
|
||||||
|
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||||
|
priv_validator_file = "config/priv_validator.json"
|
||||||
|
|
||||||
|
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
|
||||||
|
node_key_file = "config/node_key.json"
|
||||||
|
|
||||||
|
# Mechanism to connect to the ABCI application: socket | grpc
|
||||||
|
abci = "socket"
|
||||||
|
|
||||||
|
# TCP or UNIX socket address for the profiling server to listen on
|
||||||
|
prof_laddr = ""
|
||||||
|
|
||||||
|
# If true, query the ABCI app on connecting to a new peer
|
||||||
|
# so the app can decide if we should keep the connection or not
|
||||||
|
filter_peers = false
|
||||||
|
|
||||||
|
##### advanced configuration options #####
|
||||||
|
|
||||||
|
##### rpc server configuration options #####
|
||||||
|
[rpc]
|
||||||
|
|
||||||
|
# TCP or UNIX socket address for the RPC server to listen on
|
||||||
|
laddr = "tcp://0.0.0.0:46657"
|
||||||
|
|
||||||
|
# TCP or UNIX socket address for the gRPC server to listen on
|
||||||
|
# NOTE: This server only supports /broadcast_tx_commit
|
||||||
|
grpc_laddr = ""
|
||||||
|
|
||||||
|
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||||
|
unsafe = false
|
||||||
|
|
||||||
|
##### peer to peer configuration options #####
|
||||||
|
[p2p]
|
||||||
|
|
||||||
|
# Address to listen for incoming connections
|
||||||
|
laddr = "tcp://0.0.0.0:46656"
|
||||||
|
|
||||||
|
# Comma separated list of seed nodes to connect to
|
||||||
|
seeds = ""
|
||||||
|
|
||||||
|
# Comma separated list of nodes to keep persistent connections to
|
||||||
|
# Do not add private peers to this list if you don't want them advertised
|
||||||
|
persistent_peers = ""
|
||||||
|
|
||||||
|
# Path to address book
|
||||||
|
addr_book_file = "config/addrbook.json"
|
||||||
|
|
||||||
|
# Set true for strict address routability rules
|
||||||
|
addr_book_strict = true
|
||||||
|
|
||||||
|
# Time to wait before flushing messages out on the connection, in ms
|
||||||
|
flush_throttle_timeout = 100
|
||||||
|
|
||||||
|
# Maximum number of peers to connect to
|
||||||
|
max_num_peers = 50
|
||||||
|
|
||||||
|
# Maximum size of a message packet payload, in bytes
|
||||||
|
max_packet_msg_payload_size = 1024
|
||||||
|
|
||||||
|
# Rate at which packets can be sent, in bytes/second
|
||||||
|
send_rate = 512000
|
||||||
|
|
||||||
|
# Rate at which packets can be received, in bytes/second
|
||||||
|
recv_rate = 512000
|
||||||
|
|
||||||
|
# Set true to enable the peer-exchange reactor
|
||||||
|
pex = true
|
||||||
|
|
||||||
|
# Seed mode, in which node constantly crawls the network and looks for
|
||||||
|
# peers. If another node asks it for addresses, it responds and disconnects.
|
||||||
|
#
|
||||||
|
# Does not work if the peer-exchange reactor is disabled.
|
||||||
|
seed_mode = false
|
||||||
|
|
||||||
|
# Authenticated encryption
|
||||||
|
auth_enc = true
|
||||||
|
|
||||||
|
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||||
|
private_peer_ids = ""
|
||||||
|
|
||||||
|
##### mempool configuration options #####
|
||||||
|
[mempool]
|
||||||
|
|
||||||
|
recheck = true
|
||||||
|
recheck_empty = true
|
||||||
|
broadcast = true
|
||||||
|
wal_dir = "data/mempool.wal"
|
||||||
|
|
||||||
|
##### consensus configuration options #####
|
||||||
|
[consensus]
|
||||||
|
|
||||||
|
wal_file = "data/cs.wal/wal"
|
||||||
|
|
||||||
|
# All timeouts are in milliseconds
|
||||||
|
timeout_propose = 3000
|
||||||
|
timeout_propose_delta = 500
|
||||||
|
timeout_prevote = 1000
|
||||||
|
timeout_prevote_delta = 500
|
||||||
|
timeout_precommit = 1000
|
||||||
|
timeout_precommit_delta = 500
|
||||||
|
timeout_commit = 1000
|
||||||
|
|
||||||
|
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||||
|
skip_timeout_commit = false
|
||||||
|
|
||||||
|
# BlockSize
|
||||||
|
max_block_size_txs = 10000
|
||||||
|
max_block_size_bytes = 1
|
||||||
|
|
||||||
|
# EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||||
|
create_empty_blocks = true
|
||||||
|
create_empty_blocks_interval = 0
|
||||||
|
|
||||||
|
# Reactor sleep duration parameters are in milliseconds
|
||||||
|
peer_gossip_sleep_duration = 100
|
||||||
|
peer_query_maj23_sleep_duration = 2000
|
||||||
|
|
||||||
|
##### transactions indexer configuration options #####
|
||||||
|
[tx_index]
|
||||||
|
|
||||||
|
# What indexer to use for transactions
|
||||||
|
#
|
||||||
|
# Options:
|
||||||
|
# 1) "null" (default)
|
||||||
|
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||||
|
indexer = "kv"
|
||||||
|
|
||||||
|
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||||
|
#
|
||||||
|
# It's recommended to index only a subset of tags due to possible memory
|
||||||
|
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||||
|
# transactions.
|
||||||
|
index_tags = ""
|
||||||
|
|
||||||
|
# When set to true, tells indexer to index all tags. Note this may be not
|
||||||
|
# desirable (see the comment above). IndexTags has a precedence over
|
||||||
|
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||||
|
index_all_tags = false
|
39
docs/examples/node1/config/genesis.json
Normal file
39
docs/examples/node1/config/genesis.json
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
{
|
||||||
|
"genesis_time": "0001-01-01T00:00:00Z",
|
||||||
|
"chain_id": "test-chain-A2i3OZ",
|
||||||
|
"validators": [
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"app_hash": ""
|
||||||
|
}
|
1
docs/examples/node1/config/node_key.json
Normal file
1
docs/examples/node1/config/node_key.json
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"priv_key":{"type":"954568A3288910","value":"H71dc/TIG7nTselfa9nG0WRArXLKYnm7P5eFCk2lk8ASKQ3sIHpbdxCSHQD/RcdHe7TiabJeuOssNPvPWiyQEQ=="}}
|
14
docs/examples/node1/config/priv_validator.json
Normal file
14
docs/examples/node1/config/priv_validator.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"address": "BEA1B57F5806CF9AC4D54C8CF806DED5C0F102E1",
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||||
|
},
|
||||||
|
"last_height": 0,
|
||||||
|
"last_round": 0,
|
||||||
|
"last_step": 0,
|
||||||
|
"priv_key": {
|
||||||
|
"type": "954568A3288910",
|
||||||
|
"value": "o0IqrHSPtd5YqGefodWxpJuRzvuVBjgbH785vbMgk7Vvno3kYJHVp1xVG4Q2N8rD+aubZ2SFPvA1ldX9IOwqxQ=="
|
||||||
|
}
|
||||||
|
}
|
@@ -1,42 +0,0 @@
|
|||||||
{
|
|
||||||
"genesis_time":"0001-01-01T00:00:00Z",
|
|
||||||
"chain_id":"test-chain-wt7apy",
|
|
||||||
"validators":[
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node1"
|
|
||||||
}
|
|
||||||
,
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node2"
|
|
||||||
}
|
|
||||||
,
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node3"
|
|
||||||
}
|
|
||||||
,
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node4"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"app_hash":""
|
|
||||||
}
|
|
@@ -1,6 +0,0 @@
|
|||||||
{
|
|
||||||
"priv_key" : {
|
|
||||||
"data" : "DA9BAABEA7211A6D93D9A1986B4279EAB3021FAA1653D459D53E6AB4D1CFB4C69BF7D52E48CF00AC5779AA0A6D3C368955D5636A677F72370B8ED19989714CFC",
|
|
||||||
"type" : "ed25519"
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,15 +0,0 @@
|
|||||||
{
|
|
||||||
"address":"4DC2756029CE0D8F8C6C3E4C3CE6EE8C30AF352F",
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
|
||||||
},
|
|
||||||
"last_height":0,
|
|
||||||
"last_round":0,
|
|
||||||
"last_step":0,
|
|
||||||
"last_signature":null,
|
|
||||||
"priv_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data":"4D3648E1D93C8703E436BFF814728B6BD270CFDFD686DF5385E8ACBEB7BE2D7DF08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,15 +0,0 @@
|
|||||||
# This is a TOML config file.
|
|
||||||
# For more information, see https://github.com/toml-lang/toml
|
|
||||||
|
|
||||||
proxy_app = "tcp://127.0.0.1:46658"
|
|
||||||
moniker = "penguin"
|
|
||||||
fast_sync = true
|
|
||||||
db_backend = "leveldb"
|
|
||||||
log_level = "state:info,*:error"
|
|
||||||
|
|
||||||
[rpc]
|
|
||||||
laddr = "tcp://0.0.0.0:46657"
|
|
||||||
|
|
||||||
[p2p]
|
|
||||||
laddr = "tcp://0.0.0.0:46656"
|
|
||||||
seeds = ""
|
|
169
docs/examples/node2/config/config.toml
Normal file
169
docs/examples/node2/config/config.toml
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
# This is a TOML config file.
|
||||||
|
# For more information, see https://github.com/toml-lang/toml
|
||||||
|
|
||||||
|
##### main base config options #####
|
||||||
|
|
||||||
|
# TCP or UNIX socket address of the ABCI application,
|
||||||
|
# or the name of an ABCI application compiled in with the Tendermint binary
|
||||||
|
proxy_app = "tcp://127.0.0.1:46658"
|
||||||
|
|
||||||
|
# A custom human readable name for this node
|
||||||
|
moniker = "charlie"
|
||||||
|
|
||||||
|
# If this node is many blocks behind the tip of the chain, FastSync
|
||||||
|
# allows them to catchup quickly by downloading blocks in parallel
|
||||||
|
# and verifying their commits
|
||||||
|
fast_sync = true
|
||||||
|
|
||||||
|
# Database backend: leveldb | memdb
|
||||||
|
db_backend = "leveldb"
|
||||||
|
|
||||||
|
# Database directory
|
||||||
|
db_path = "data"
|
||||||
|
|
||||||
|
# Output level for logging, including package level options
|
||||||
|
log_level = "main:info,state:info,*:error"
|
||||||
|
|
||||||
|
##### additional base config options #####
|
||||||
|
|
||||||
|
# Path to the JSON file containing the initial validator set and other meta data
|
||||||
|
genesis_file = "config/genesis.json"
|
||||||
|
|
||||||
|
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||||
|
priv_validator_file = "config/priv_validator.json"
|
||||||
|
|
||||||
|
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
|
||||||
|
node_key_file = "config/node_key.json"
|
||||||
|
|
||||||
|
# Mechanism to connect to the ABCI application: socket | grpc
|
||||||
|
abci = "socket"
|
||||||
|
|
||||||
|
# TCP or UNIX socket address for the profiling server to listen on
|
||||||
|
prof_laddr = ""
|
||||||
|
|
||||||
|
# If true, query the ABCI app on connecting to a new peer
|
||||||
|
# so the app can decide if we should keep the connection or not
|
||||||
|
filter_peers = false
|
||||||
|
|
||||||
|
##### advanced configuration options #####
|
||||||
|
|
||||||
|
##### rpc server configuration options #####
|
||||||
|
[rpc]
|
||||||
|
|
||||||
|
# TCP or UNIX socket address for the RPC server to listen on
|
||||||
|
laddr = "tcp://0.0.0.0:46657"
|
||||||
|
|
||||||
|
# TCP or UNIX socket address for the gRPC server to listen on
|
||||||
|
# NOTE: This server only supports /broadcast_tx_commit
|
||||||
|
grpc_laddr = ""
|
||||||
|
|
||||||
|
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||||
|
unsafe = false
|
||||||
|
|
||||||
|
##### peer to peer configuration options #####
|
||||||
|
[p2p]
|
||||||
|
|
||||||
|
# Address to listen for incoming connections
|
||||||
|
laddr = "tcp://0.0.0.0:46656"
|
||||||
|
|
||||||
|
# Comma separated list of seed nodes to connect to
|
||||||
|
seeds = ""
|
||||||
|
|
||||||
|
# Comma separated list of nodes to keep persistent connections to
|
||||||
|
# Do not add private peers to this list if you don't want them advertised
|
||||||
|
persistent_peers = ""
|
||||||
|
|
||||||
|
# Path to address book
|
||||||
|
addr_book_file = "config/addrbook.json"
|
||||||
|
|
||||||
|
# Set true for strict address routability rules
|
||||||
|
addr_book_strict = true
|
||||||
|
|
||||||
|
# Time to wait before flushing messages out on the connection, in ms
|
||||||
|
flush_throttle_timeout = 100
|
||||||
|
|
||||||
|
# Maximum number of peers to connect to
|
||||||
|
max_num_peers = 50
|
||||||
|
|
||||||
|
# Maximum size of a message packet payload, in bytes
|
||||||
|
max_packet_msg_payload_size = 1024
|
||||||
|
|
||||||
|
# Rate at which packets can be sent, in bytes/second
|
||||||
|
send_rate = 512000
|
||||||
|
|
||||||
|
# Rate at which packets can be received, in bytes/second
|
||||||
|
recv_rate = 512000
|
||||||
|
|
||||||
|
# Set true to enable the peer-exchange reactor
|
||||||
|
pex = true
|
||||||
|
|
||||||
|
# Seed mode, in which node constantly crawls the network and looks for
|
||||||
|
# peers. If another node asks it for addresses, it responds and disconnects.
|
||||||
|
#
|
||||||
|
# Does not work if the peer-exchange reactor is disabled.
|
||||||
|
seed_mode = false
|
||||||
|
|
||||||
|
# Authenticated encryption
|
||||||
|
auth_enc = true
|
||||||
|
|
||||||
|
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||||
|
private_peer_ids = ""
|
||||||
|
|
||||||
|
##### mempool configuration options #####
|
||||||
|
[mempool]
|
||||||
|
|
||||||
|
recheck = true
|
||||||
|
recheck_empty = true
|
||||||
|
broadcast = true
|
||||||
|
wal_dir = "data/mempool.wal"
|
||||||
|
|
||||||
|
##### consensus configuration options #####
|
||||||
|
[consensus]
|
||||||
|
|
||||||
|
wal_file = "data/cs.wal/wal"
|
||||||
|
|
||||||
|
# All timeouts are in milliseconds
|
||||||
|
timeout_propose = 3000
|
||||||
|
timeout_propose_delta = 500
|
||||||
|
timeout_prevote = 1000
|
||||||
|
timeout_prevote_delta = 500
|
||||||
|
timeout_precommit = 1000
|
||||||
|
timeout_precommit_delta = 500
|
||||||
|
timeout_commit = 1000
|
||||||
|
|
||||||
|
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||||
|
skip_timeout_commit = false
|
||||||
|
|
||||||
|
# BlockSize
|
||||||
|
max_block_size_txs = 10000
|
||||||
|
max_block_size_bytes = 1
|
||||||
|
|
||||||
|
# EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||||
|
create_empty_blocks = true
|
||||||
|
create_empty_blocks_interval = 0
|
||||||
|
|
||||||
|
# Reactor sleep duration parameters are in milliseconds
|
||||||
|
peer_gossip_sleep_duration = 100
|
||||||
|
peer_query_maj23_sleep_duration = 2000
|
||||||
|
|
||||||
|
##### transactions indexer configuration options #####
|
||||||
|
[tx_index]
|
||||||
|
|
||||||
|
# What indexer to use for transactions
|
||||||
|
#
|
||||||
|
# Options:
|
||||||
|
# 1) "null" (default)
|
||||||
|
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||||
|
indexer = "kv"
|
||||||
|
|
||||||
|
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||||
|
#
|
||||||
|
# It's recommended to index only a subset of tags due to possible memory
|
||||||
|
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||||
|
# transactions.
|
||||||
|
index_tags = ""
|
||||||
|
|
||||||
|
# When set to true, tells indexer to index all tags. Note this may be not
|
||||||
|
# desirable (see the comment above). IndexTags has a precedence over
|
||||||
|
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||||
|
index_all_tags = false
|
39
docs/examples/node2/config/genesis.json
Normal file
39
docs/examples/node2/config/genesis.json
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
{
|
||||||
|
"genesis_time": "0001-01-01T00:00:00Z",
|
||||||
|
"chain_id": "test-chain-A2i3OZ",
|
||||||
|
"validators": [
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"app_hash": ""
|
||||||
|
}
|
1
docs/examples/node2/config/node_key.json
Normal file
1
docs/examples/node2/config/node_key.json
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"priv_key":{"type":"954568A3288910","value":"COHZ/Y2cWGWxJNkRwtpQBt5sYvOnb6Gpz0lO46XERRJFBIdSWD5x1UMGRSTmnvW1ec5G4bMdg6zUZKOZD+vVPg=="}}
|
14
docs/examples/node2/config/priv_validator.json
Normal file
14
docs/examples/node2/config/priv_validator.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"address": "F0AA266949FB29ADA0B679C27889ED930BD1BDA1",
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||||
|
},
|
||||||
|
"last_height": 0,
|
||||||
|
"last_round": 0,
|
||||||
|
"last_step": 0,
|
||||||
|
"priv_key": {
|
||||||
|
"type": "954568A3288910",
|
||||||
|
"value": "khADeZ5K/8u/L99DFaZNRq8V5g+EHWbwfqFjhCrppaAiBkOkm8YDRMBqaJwDyKtzL5Ff8GRSWPoNfAzv3XLAhQ=="
|
||||||
|
}
|
||||||
|
}
|
@@ -1,42 +0,0 @@
|
|||||||
{
|
|
||||||
"genesis_time":"0001-01-01T00:00:00Z",
|
|
||||||
"chain_id":"test-chain-wt7apy",
|
|
||||||
"validators":[
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node1"
|
|
||||||
}
|
|
||||||
,
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node2"
|
|
||||||
}
|
|
||||||
,
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node3"
|
|
||||||
}
|
|
||||||
,
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node4"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"app_hash":""
|
|
||||||
}
|
|
@@ -1,6 +0,0 @@
|
|||||||
{
|
|
||||||
"priv_key" : {
|
|
||||||
"data" : "F7BCABA165DFC0DDD50AE563EFB285BAA236EA805D35612504238A36EFA105958756442B1D9F942D7ABD259F2D59671657B6378E9C7194342A7AAA47A66D1E95",
|
|
||||||
"type" : "ed25519"
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,15 +0,0 @@
|
|||||||
{
|
|
||||||
"address": "DD6C63A762608A9DDD4A845657743777F63121D6",
|
|
||||||
"pub_key": {
|
|
||||||
"type": "ed25519",
|
|
||||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
|
||||||
},
|
|
||||||
"last_height": 0,
|
|
||||||
"last_round": 0,
|
|
||||||
"last_step": 0,
|
|
||||||
"last_signature": null,
|
|
||||||
"priv_key": {
|
|
||||||
"type": "ed25519",
|
|
||||||
"data": "7B0DE666FF5E9B437D284BCE767F612381890C018B93B0A105D2E829A568DA6FA8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,15 +0,0 @@
|
|||||||
# This is a TOML config file.
|
|
||||||
# For more information, see https://github.com/toml-lang/toml
|
|
||||||
|
|
||||||
proxy_app = "tcp://127.0.0.1:46658"
|
|
||||||
moniker = "penguin"
|
|
||||||
fast_sync = true
|
|
||||||
db_backend = "leveldb"
|
|
||||||
log_level = "state:info,*:error"
|
|
||||||
|
|
||||||
[rpc]
|
|
||||||
laddr = "tcp://0.0.0.0:46657"
|
|
||||||
|
|
||||||
[p2p]
|
|
||||||
laddr = "tcp://0.0.0.0:46656"
|
|
||||||
seeds = ""
|
|
169
docs/examples/node3/config/config.toml
Normal file
169
docs/examples/node3/config/config.toml
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
# This is a TOML config file.
|
||||||
|
# For more information, see https://github.com/toml-lang/toml
|
||||||
|
|
||||||
|
##### main base config options #####
|
||||||
|
|
||||||
|
# TCP or UNIX socket address of the ABCI application,
|
||||||
|
# or the name of an ABCI application compiled in with the Tendermint binary
|
||||||
|
proxy_app = "tcp://127.0.0.1:46658"
|
||||||
|
|
||||||
|
# A custom human readable name for this node
|
||||||
|
moniker = "delta"
|
||||||
|
|
||||||
|
# If this node is many blocks behind the tip of the chain, FastSync
|
||||||
|
# allows them to catchup quickly by downloading blocks in parallel
|
||||||
|
# and verifying their commits
|
||||||
|
fast_sync = true
|
||||||
|
|
||||||
|
# Database backend: leveldb | memdb
|
||||||
|
db_backend = "leveldb"
|
||||||
|
|
||||||
|
# Database directory
|
||||||
|
db_path = "data"
|
||||||
|
|
||||||
|
# Output level for logging, including package level options
|
||||||
|
log_level = "main:info,state:info,*:error"
|
||||||
|
|
||||||
|
##### additional base config options #####
|
||||||
|
|
||||||
|
# Path to the JSON file containing the initial validator set and other meta data
|
||||||
|
genesis_file = "config/genesis.json"
|
||||||
|
|
||||||
|
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||||
|
priv_validator_file = "config/priv_validator.json"
|
||||||
|
|
||||||
|
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
|
||||||
|
node_key_file = "config/node_key.json"
|
||||||
|
|
||||||
|
# Mechanism to connect to the ABCI application: socket | grpc
|
||||||
|
abci = "socket"
|
||||||
|
|
||||||
|
# TCP or UNIX socket address for the profiling server to listen on
|
||||||
|
prof_laddr = ""
|
||||||
|
|
||||||
|
# If true, query the ABCI app on connecting to a new peer
|
||||||
|
# so the app can decide if we should keep the connection or not
|
||||||
|
filter_peers = false
|
||||||
|
|
||||||
|
##### advanced configuration options #####
|
||||||
|
|
||||||
|
##### rpc server configuration options #####
|
||||||
|
[rpc]
|
||||||
|
|
||||||
|
# TCP or UNIX socket address for the RPC server to listen on
|
||||||
|
laddr = "tcp://0.0.0.0:46657"
|
||||||
|
|
||||||
|
# TCP or UNIX socket address for the gRPC server to listen on
|
||||||
|
# NOTE: This server only supports /broadcast_tx_commit
|
||||||
|
grpc_laddr = ""
|
||||||
|
|
||||||
|
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||||
|
unsafe = false
|
||||||
|
|
||||||
|
##### peer to peer configuration options #####
|
||||||
|
[p2p]
|
||||||
|
|
||||||
|
# Address to listen for incoming connections
|
||||||
|
laddr = "tcp://0.0.0.0:46656"
|
||||||
|
|
||||||
|
# Comma separated list of seed nodes to connect to
|
||||||
|
seeds = ""
|
||||||
|
|
||||||
|
# Comma separated list of nodes to keep persistent connections to
|
||||||
|
# Do not add private peers to this list if you don't want them advertised
|
||||||
|
persistent_peers = ""
|
||||||
|
|
||||||
|
# Path to address book
|
||||||
|
addr_book_file = "config/addrbook.json"
|
||||||
|
|
||||||
|
# Set true for strict address routability rules
|
||||||
|
addr_book_strict = true
|
||||||
|
|
||||||
|
# Time to wait before flushing messages out on the connection, in ms
|
||||||
|
flush_throttle_timeout = 100
|
||||||
|
|
||||||
|
# Maximum number of peers to connect to
|
||||||
|
max_num_peers = 50
|
||||||
|
|
||||||
|
# Maximum size of a message packet payload, in bytes
|
||||||
|
max_packet_msg_payload_size = 1024
|
||||||
|
|
||||||
|
# Rate at which packets can be sent, in bytes/second
|
||||||
|
send_rate = 512000
|
||||||
|
|
||||||
|
# Rate at which packets can be received, in bytes/second
|
||||||
|
recv_rate = 512000
|
||||||
|
|
||||||
|
# Set true to enable the peer-exchange reactor
|
||||||
|
pex = true
|
||||||
|
|
||||||
|
# Seed mode, in which node constantly crawls the network and looks for
|
||||||
|
# peers. If another node asks it for addresses, it responds and disconnects.
|
||||||
|
#
|
||||||
|
# Does not work if the peer-exchange reactor is disabled.
|
||||||
|
seed_mode = false
|
||||||
|
|
||||||
|
# Authenticated encryption
|
||||||
|
auth_enc = true
|
||||||
|
|
||||||
|
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||||
|
private_peer_ids = ""
|
||||||
|
|
||||||
|
##### mempool configuration options #####
|
||||||
|
[mempool]
|
||||||
|
|
||||||
|
recheck = true
|
||||||
|
recheck_empty = true
|
||||||
|
broadcast = true
|
||||||
|
wal_dir = "data/mempool.wal"
|
||||||
|
|
||||||
|
##### consensus configuration options #####
|
||||||
|
[consensus]
|
||||||
|
|
||||||
|
wal_file = "data/cs.wal/wal"
|
||||||
|
|
||||||
|
# All timeouts are in milliseconds
|
||||||
|
timeout_propose = 3000
|
||||||
|
timeout_propose_delta = 500
|
||||||
|
timeout_prevote = 1000
|
||||||
|
timeout_prevote_delta = 500
|
||||||
|
timeout_precommit = 1000
|
||||||
|
timeout_precommit_delta = 500
|
||||||
|
timeout_commit = 1000
|
||||||
|
|
||||||
|
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||||
|
skip_timeout_commit = false
|
||||||
|
|
||||||
|
# BlockSize
|
||||||
|
max_block_size_txs = 10000
|
||||||
|
max_block_size_bytes = 1
|
||||||
|
|
||||||
|
# EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||||
|
create_empty_blocks = true
|
||||||
|
create_empty_blocks_interval = 0
|
||||||
|
|
||||||
|
# Reactor sleep duration parameters are in milliseconds
|
||||||
|
peer_gossip_sleep_duration = 100
|
||||||
|
peer_query_maj23_sleep_duration = 2000
|
||||||
|
|
||||||
|
##### transactions indexer configuration options #####
|
||||||
|
[tx_index]
|
||||||
|
|
||||||
|
# What indexer to use for transactions
|
||||||
|
#
|
||||||
|
# Options:
|
||||||
|
# 1) "null" (default)
|
||||||
|
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||||
|
indexer = "kv"
|
||||||
|
|
||||||
|
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||||
|
#
|
||||||
|
# It's recommended to index only a subset of tags due to possible memory
|
||||||
|
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||||
|
# transactions.
|
||||||
|
index_tags = ""
|
||||||
|
|
||||||
|
# When set to true, tells indexer to index all tags. Note this may be not
|
||||||
|
# desirable (see the comment above). IndexTags has a precedence over
|
||||||
|
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||||
|
index_all_tags = false
|
39
docs/examples/node3/config/genesis.json
Normal file
39
docs/examples/node3/config/genesis.json
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
{
|
||||||
|
"genesis_time": "0001-01-01T00:00:00Z",
|
||||||
|
"chain_id": "test-chain-A2i3OZ",
|
||||||
|
"validators": [
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
|
||||||
|
},
|
||||||
|
"power": 10,
|
||||||
|
"name": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"app_hash": ""
|
||||||
|
}
|
1
docs/examples/node3/config/node_key.json
Normal file
1
docs/examples/node3/config/node_key.json
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"priv_key":{"type":"954568A3288910","value":"9Y9xp/tUJJ6pHTF5SUV0bGKYSdVbFtMHu+Lr8S0JBSZAwneaejnfOEU1LMKOnQ07skrDUaJcj5di3jAyjxJzqg=="}}
|
14
docs/examples/node3/config/priv_validator.json
Normal file
14
docs/examples/node3/config/priv_validator.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"address": "9A1A6914EB5F4FF0269C7EEEE627C27310CC64F9",
|
||||||
|
"pub_key": {
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
|
||||||
|
},
|
||||||
|
"last_height": 0,
|
||||||
|
"last_round": 0,
|
||||||
|
"last_step": 0,
|
||||||
|
"priv_key": {
|
||||||
|
"type": "954568A3288910",
|
||||||
|
"value": "jb52LZ5gp+eQ8nJlFK1z06nBMp1gD8ICmyzdM1icGOgoYBl/Fm8hntptt4hDzlTUQIbr4jrYpJ1ofy6VzT46JQ=="
|
||||||
|
}
|
||||||
|
}
|
@@ -1,42 +0,0 @@
|
|||||||
{
|
|
||||||
"genesis_time":"0001-01-01T00:00:00Z",
|
|
||||||
"chain_id":"test-chain-wt7apy",
|
|
||||||
"validators":[
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node1"
|
|
||||||
}
|
|
||||||
,
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node2"
|
|
||||||
}
|
|
||||||
,
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node3"
|
|
||||||
}
|
|
||||||
,
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node4"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"app_hash":""
|
|
||||||
}
|
|
@@ -1,6 +0,0 @@
|
|||||||
{
|
|
||||||
"priv_key" : {
|
|
||||||
"data" : "95136FCC97E4446B3141EDF9841078107ECE755E99925D79CCBF91085492680B3CA1034D9917DF1DED4E4AB2D9BC225919F6CB2176F210D2368697CC339DF4E7",
|
|
||||||
"type" : "ed25519"
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,15 +0,0 @@
|
|||||||
{
|
|
||||||
"address": "6D6A1E313B407B5474106CA8759C976B777AB659",
|
|
||||||
"pub_key": {
|
|
||||||
"type": "ed25519",
|
|
||||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
|
||||||
},
|
|
||||||
"last_height": 0,
|
|
||||||
"last_round": 0,
|
|
||||||
"last_step": 0,
|
|
||||||
"last_signature": null,
|
|
||||||
"priv_key": {
|
|
||||||
"type": "ed25519",
|
|
||||||
"data": "622432A370111A5C25CFE121E163FE709C9D5C95F551EDBD7A2C69A8545C9B76E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,15 +0,0 @@
|
|||||||
# This is a TOML config file.
|
|
||||||
# For more information, see https://github.com/toml-lang/toml
|
|
||||||
|
|
||||||
proxy_app = "tcp://127.0.0.1:46658"
|
|
||||||
moniker = "penguin"
|
|
||||||
fast_sync = true
|
|
||||||
db_backend = "leveldb"
|
|
||||||
log_level = "state:info,*:error"
|
|
||||||
|
|
||||||
[rpc]
|
|
||||||
laddr = "tcp://0.0.0.0:46657"
|
|
||||||
|
|
||||||
[p2p]
|
|
||||||
laddr = "tcp://0.0.0.0:46656"
|
|
||||||
seeds = ""
|
|
@@ -1,42 +0,0 @@
|
|||||||
{
|
|
||||||
"genesis_time":"0001-01-01T00:00:00Z",
|
|
||||||
"chain_id":"test-chain-wt7apy",
|
|
||||||
"validators":[
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node1"
|
|
||||||
}
|
|
||||||
,
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node2"
|
|
||||||
}
|
|
||||||
,
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node3"
|
|
||||||
}
|
|
||||||
,
|
|
||||||
{
|
|
||||||
"pub_key":{
|
|
||||||
"type":"ed25519",
|
|
||||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
|
||||||
},
|
|
||||||
"power":10,
|
|
||||||
"name":"node4"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"app_hash":""
|
|
||||||
}
|
|
@@ -1,6 +0,0 @@
|
|||||||
{
|
|
||||||
"priv_key" : {
|
|
||||||
"data" : "8895D6C9A1B46AB83A8E2BAE2121B8C3E245B9E9126EBD797FEAC5058285F2F64FDE2E8182C88AD5185A49D837C581465D57BD478C41865A66D7D9742D8AEF57",
|
|
||||||
"type" : "ed25519"
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,15 +0,0 @@
|
|||||||
{
|
|
||||||
"address": "829A9663611D3DD88A3D84EA0249679D650A0755",
|
|
||||||
"pub_key": {
|
|
||||||
"type": "ed25519",
|
|
||||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
|
||||||
},
|
|
||||||
"last_height": 0,
|
|
||||||
"last_round": 0,
|
|
||||||
"last_step": 0,
|
|
||||||
"last_signature": null,
|
|
||||||
"priv_key": {
|
|
||||||
"type": "ed25519",
|
|
||||||
"data": "0A604D1C9AE94A50150BF39E603239092F9392E4773F4D8F4AC1D86E6438E89E2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
|
||||||
}
|
|
||||||
}
|
|
@@ -40,10 +40,8 @@ Tendermint Tools
|
|||||||
:maxdepth: 2
|
:maxdepth: 2
|
||||||
|
|
||||||
deploy-testnets.rst
|
deploy-testnets.rst
|
||||||
tools/ansible.rst
|
terraform-and-ansible.rst
|
||||||
tools/docker.rst
|
tools/docker.rst
|
||||||
tools/mintnet-kubernetes.rst
|
|
||||||
tools/terraform-digitalocean.rst
|
|
||||||
tools/benchmarking.rst
|
tools/benchmarking.rst
|
||||||
tools/monitoring.rst
|
tools/monitoring.rst
|
||||||
|
|
||||||
@@ -67,6 +65,7 @@ Tendermint 201
|
|||||||
|
|
||||||
specification.rst
|
specification.rst
|
||||||
determinism.rst
|
determinism.rst
|
||||||
|
transactional-semantics.rst
|
||||||
|
|
||||||
* For a deeper dive, see `this thesis <https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769>`__.
|
* For a deeper dive, see `this thesis <https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769>`__.
|
||||||
* There is also the `original whitepaper <https://tendermint.com/static/docs/tendermint.pdf>`__, though it is now quite outdated.
|
* There is also the `original whitepaper <https://tendermint.com/static/docs/tendermint.pdf>`__, though it is now quite outdated.
|
||||||
|
@@ -4,53 +4,48 @@ Install Tendermint
|
|||||||
From Binary
|
From Binary
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
To download pre-built binaries, see the `Download page <https://tendermint.com/downloads>`__.
|
To download pre-built binaries, see the `releases page <https://github.com/tendermint/tendermint/releases>`__.
|
||||||
|
|
||||||
From Source
|
From Source
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
You'll need ``go``, maybe `dep <https://github.com/golang/dep>`__, and the Tendermint source code.
|
You'll need ``go`` `installed <https://golang.org/doc/install>`__ and the required
|
||||||
|
`environment variables set <https://github.com/tendermint/tendermint/wiki/Setting-GOPATH>`__
|
||||||
Install Go
|
|
||||||
^^^^^^^^^^
|
|
||||||
|
|
||||||
Make sure you have `installed Go <https://golang.org/doc/install>`__ and
|
|
||||||
set the ``GOPATH``. You should also put ``GOPATH/bin`` on your ``PATH``.
|
|
||||||
|
|
||||||
Get Source Code
|
Get Source Code
|
||||||
^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
You should be able to install the latest with a simple
|
::
|
||||||
|
|
||||||
|
mkdir -p $GOPATH/src/github.com/tendermint
|
||||||
|
cd $GOPATH/src/github.com/tendermint
|
||||||
|
git clone https://github.com/tendermint/tendermint.git
|
||||||
|
cd tendermint
|
||||||
|
|
||||||
|
Get Tools & Dependencies
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
go get github.com/tendermint/tendermint/cmd/tendermint
|
|
||||||
|
|
||||||
Run ``tendermint --help`` and ``tendermint version`` to ensure your
|
|
||||||
installation worked.
|
|
||||||
|
|
||||||
If the installation failed, a dependency may have been updated and become
|
|
||||||
incompatible with the latest Tendermint master branch. We solve this
|
|
||||||
using the ``dep`` tool for dependency management.
|
|
||||||
|
|
||||||
First, install ``dep``:
|
|
||||||
|
|
||||||
::
|
|
||||||
|
|
||||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
|
||||||
make get_tools
|
make get_tools
|
||||||
|
make get_vendor_deps
|
||||||
|
|
||||||
Now we can fetch the correct versions of each dependency by running:
|
Compile
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
make get_vendor_deps
|
|
||||||
make install
|
make install
|
||||||
|
|
||||||
Note that even though ``go get`` originally failed, the repository was
|
to put the binary in ``$GOPATH/bin`` or use:
|
||||||
still cloned to the correct location in the ``$GOPATH``.
|
|
||||||
|
|
||||||
The latest Tendermint Core version is now installed.
|
::
|
||||||
|
|
||||||
|
make build
|
||||||
|
|
||||||
|
to put the binary in ``./build``.
|
||||||
|
|
||||||
|
The latest ``tendermint version`` is now installed.
|
||||||
|
|
||||||
Reinstall
|
Reinstall
|
||||||
---------
|
---------
|
||||||
@@ -86,20 +81,6 @@ do, use ``dep``, as above:
|
|||||||
Since the third option just uses ``dep`` right away, it should always
|
Since the third option just uses ``dep`` right away, it should always
|
||||||
work.
|
work.
|
||||||
|
|
||||||
Troubleshooting
|
|
||||||
---------------
|
|
||||||
|
|
||||||
If ``go get`` failing bothers you, fetch the code using ``git``:
|
|
||||||
|
|
||||||
::
|
|
||||||
|
|
||||||
mkdir -p $GOPATH/src/github.com/tendermint
|
|
||||||
git clone https://github.com/tendermint/tendermint $GOPATH/src/github.com/tendermint/tendermint
|
|
||||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
|
||||||
make get_tools
|
|
||||||
make get_vendor_deps
|
|
||||||
make install
|
|
||||||
|
|
||||||
Run
|
Run
|
||||||
^^^
|
^^^
|
||||||
|
|
||||||
|
76
docs/spec/README.md
Normal file
76
docs/spec/README.md
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
# Tendermint Specification
|
||||||
|
|
||||||
|
This is a markdown specification of the Tendermint blockchain.
|
||||||
|
It defines the base data structures, how they are validated,
|
||||||
|
and how they are communicated over the network.
|
||||||
|
|
||||||
|
If you find discrepancies between the spec and the code that
|
||||||
|
do not have an associated issue or pull request on github,
|
||||||
|
please submit them to our [bug bounty](https://tendermint.com/security)!
|
||||||
|
|
||||||
|
## Contents
|
||||||
|
|
||||||
|
- [Overview](#overview)
|
||||||
|
|
||||||
|
### Data Structures
|
||||||
|
|
||||||
|
- [Encoding and Digests](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md)
|
||||||
|
- [Blockchain](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md)
|
||||||
|
- [State](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/state.md)
|
||||||
|
|
||||||
|
### Consensus Protocol
|
||||||
|
|
||||||
|
- TODO
|
||||||
|
|
||||||
|
### P2P and Network Protocols
|
||||||
|
|
||||||
|
- [The Base P2P Layer](https://github.com/tendermint/tendermint/tree/master/docs/spec/p2p): multiplex the protocols ("reactors") on authenticated and encrypted TCP connections
|
||||||
|
- [Peer Exchange (PEX)](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/pex): gossip known peer addresses so peers can find each other
|
||||||
|
- [Block Sync](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/block_sync): gossip blocks so peers can catch up quickly
|
||||||
|
- [Consensus](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus): gossip votes and block parts so new blocks can be committed
|
||||||
|
- [Mempool](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/mempool): gossip transactions so they get included in blocks
|
||||||
|
- Evidence: TODO
|
||||||
|
|
||||||
|
### More
|
||||||
|
- Light Client: TODO
|
||||||
|
- Persistence: TODO
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Tendermint provides Byzantine Fault Tolerant State Machine Replication using
|
||||||
|
hash-linked batches of transactions. Such transaction batches are called "blocks".
|
||||||
|
Hence, Tendermint defines a "blockchain".
|
||||||
|
|
||||||
|
Each block in Tendermint has a unique index - its Height.
|
||||||
|
A block at `Height == H` can only be committed *after* the
|
||||||
|
block at `Height == H-1`.
|
||||||
|
Each block is committed by a known set of weighted Validators.
|
||||||
|
Membership and weighting within this set may change over time.
|
||||||
|
Tendermint guarantees the safety and liveness of the blockchain
|
||||||
|
so long as less than 1/3 of the total weight of the Validator set
|
||||||
|
is malicious or faulty.
|
||||||
|
|
||||||
|
A commit in Tendermint is a set of signed messages from more than 2/3 of
|
||||||
|
the total weight of the current Validator set. Validators take turns proposing
|
||||||
|
blocks and voting on them. Once enough votes are received, the block is considered
|
||||||
|
committed. These votes are included in the *next* block as proof that the previous block
|
||||||
|
was committed - they cannot be included in the current block, as that block has already been
|
||||||
|
created.
|
||||||
|
|
||||||
|
Once a block is committed, it can be executed against an application.
|
||||||
|
The application returns results for each of the transactions in the block.
|
||||||
|
The application can also return changes to be made to the validator set,
|
||||||
|
as well as a cryptographic digest of its latest state.
|
||||||
|
|
||||||
|
Tendermint is designed to enable efficient verification and authentication
|
||||||
|
of the latest state of the blockchain. To achieve this, it embeds
|
||||||
|
cryptographic commitments to certain information in the block "header".
|
||||||
|
This information includes the contents of the block (eg. the transactions),
|
||||||
|
the validator set committing the block, as well as the various results returned by the application.
|
||||||
|
Note, however, that block execution only occurs *after* a block is committed.
|
||||||
|
Thus, application results can only be included in the *next* block.
|
||||||
|
|
||||||
|
Also note that information like the transaction results and the validator set are never
|
||||||
|
directly included in the block - only their cryptographic digests (Merkle roots) are.
|
||||||
|
Hence, verification of a block requires a separate data structure to store this information.
|
||||||
|
We call this the `State`. Block verification also requires access to the previous block.
|
@@ -162,7 +162,7 @@ We refer to certain globally available objects:
|
|||||||
and `state` keeps track of the validator set, the consensus parameters
|
and `state` keeps track of the validator set, the consensus parameters
|
||||||
and other results from the application.
|
and other results from the application.
|
||||||
Elements of an object are accessed as expected,
|
Elements of an object are accessed as expected,
|
||||||
ie. `block.Header`. See [here](state.md) for the definition of `state`.
|
ie. `block.Header`. See [here](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/state.md) for the definition of `state`.
|
||||||
|
|
||||||
### Header
|
### Header
|
||||||
|
|
321
docs/spec/blockchain/encoding.md
Normal file
321
docs/spec/blockchain/encoding.md
Normal file
@@ -0,0 +1,321 @@
|
|||||||
|
# Tendermint Encoding
|
||||||
|
|
||||||
|
## Amino
|
||||||
|
|
||||||
|
Tendermint uses the Protobuf3 derivative [Amino](https://github.com/tendermint/go-amino) for all data structures.
|
||||||
|
Think of Amino as an object-oriented Protobuf3 with native JSON support.
|
||||||
|
The goal of the Amino encoding protocol is to bring parity between application
|
||||||
|
logic objects and persistence objects.
|
||||||
|
|
||||||
|
Please see the [Amino
|
||||||
|
specification](https://github.com/tendermint/go-amino#amino-encoding-for-go) for
|
||||||
|
more details.
|
||||||
|
|
||||||
|
Notably, every object that satisfies an interface (eg. a particular kind of p2p message,
|
||||||
|
or a particular kind of pubkey) is registered with a global name, the hash of
|
||||||
|
which is included in the object's encoding as the so-called "prefix bytes".
|
||||||
|
|
||||||
|
We define the `func AminoEncode(obj interface{}) []byte` function to take an
|
||||||
|
arbitrary object and return the Amino encoded bytes.
|
||||||
|
|
||||||
|
## Byte Arrays
|
||||||
|
|
||||||
|
The encoding of a byte array is simply the raw-bytes prefixed with the length of
|
||||||
|
the array as a `UVarint` (what Protobuf calls a `Varint`).
|
||||||
|
|
||||||
|
For details on varints, see the [protobuf
|
||||||
|
spec](https://developers.google.com/protocol-buffers/docs/encoding#varints).
|
||||||
|
|
||||||
|
For example, the byte-array `[0xA, 0xB]` would be encoded as `0x020A0B`,
|
||||||
|
while a byte-array containing 300 entires beginning with `[0xA, 0xB, ...]` would
|
||||||
|
be encoded as `0xAC020A0B...` where `0xAC02` is the UVarint encoding of 300.
|
||||||
|
|
||||||
|
## Public Key Cryptography
|
||||||
|
|
||||||
|
Tendermint uses Amino to distinguish between different types of private keys,
|
||||||
|
public keys, and signatures. Additionally, for each public key, Tendermint
|
||||||
|
defines an Address function that can be used as a more compact identifier in
|
||||||
|
place of the public key. Here we list the concrete types, their names,
|
||||||
|
and prefix bytes for public keys and signatures, as well as the address schemes
|
||||||
|
for each PubKey. Note for brevity we don't
|
||||||
|
include details of the private keys beyond their type and name, as they can be
|
||||||
|
derrived the same way as the others using Amino.
|
||||||
|
|
||||||
|
All registered objects are encoded by Amino using a 4-byte PrefixBytes that
|
||||||
|
uniquely identifies the object and includes information about its underlying
|
||||||
|
type. For details on how PrefixBytes are computed, see the [Amino
|
||||||
|
spec](https://github.com/tendermint/go-amino#computing-the-prefix-and-disambiguation-bytes).
|
||||||
|
|
||||||
|
In what follows, we provide the type names and prefix bytes directly.
|
||||||
|
Notice that when encoding byte-arrays, the length of the byte-array is appended
|
||||||
|
to the PrefixBytes. Thus the encoding of a byte array becomes `<PrefixBytes>
|
||||||
|
<Length> <ByteArray>`
|
||||||
|
|
||||||
|
NOTE: the remainder of this section on Public Key Cryptography can be generated
|
||||||
|
from [this script](https://github.com/tendermint/tendermint/blob/master/docs/spec/scripts/crypto.go)
|
||||||
|
|
||||||
|
### PubKeyEd25519
|
||||||
|
|
||||||
|
```
|
||||||
|
// Name: tendermint/PubKeyEd25519
|
||||||
|
// PrefixBytes: 0x1624DE62
|
||||||
|
// Length: 0x20
|
||||||
|
// Notes: raw 32-byte Ed25519 pubkey
|
||||||
|
type PubKeyEd25519 [32]byte
|
||||||
|
|
||||||
|
func (pubkey PubKeyEd25519) Address() []byte {
|
||||||
|
// NOTE: hash of the Amino encoded bytes!
|
||||||
|
return RIPEMD160(AminoEncode(pubkey))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, the 32-byte Ed25519 pubkey
|
||||||
|
`CCACD52F9B29D04393F01CD9AF6535455668115641F3D8BAEFD2295F24BAF60E` would be
|
||||||
|
encoded as
|
||||||
|
`1624DE6220CCACD52F9B29D04393F01CD9AF6535455668115641F3D8BAEFD2295F24BAF60E`.
|
||||||
|
|
||||||
|
The address would then be
|
||||||
|
`RIPEMD160(0x1624DE6220CCACD52F9B29D04393F01CD9AF6535455668115641F3D8BAEFD2295F24BAF60E)`
|
||||||
|
or `430FF75BAF1EC4B0D51BB3EEC2955479D0071605`
|
||||||
|
|
||||||
|
### SignatureEd25519
|
||||||
|
|
||||||
|
```
|
||||||
|
// Name: tendermint/SignatureKeyEd25519
|
||||||
|
// PrefixBytes: 0x3DA1DB2A
|
||||||
|
// Length: 0x40
|
||||||
|
// Notes: raw 64-byte Ed25519 signature
|
||||||
|
type SignatureEd25519 [64]byte
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, the 64-byte Ed25519 signature
|
||||||
|
`1B6034A8ED149D3C94FDA13EC03B26CC0FB264D9B0E47D3FA3DEF9FCDE658E49C80B35F9BE74949356401B15B18FB817D6E54495AD1C4A8401B248466CB0DB0B`
|
||||||
|
would be encoded as
|
||||||
|
`3DA1DB2A401B6034A8ED149D3C94FDA13EC03B26CC0FB264D9B0E47D3FA3DEF9FCDE658E49C80B35F9BE74949356401B15B18FB817D6E54495AD1C4A8401B248466CB0DB0B`
|
||||||
|
|
||||||
|
### PrivKeyEd25519
|
||||||
|
|
||||||
|
```
|
||||||
|
// Name: tendermint/PrivKeyEd25519
|
||||||
|
// Notes: raw 32-byte priv key concatenated to raw 32-byte pub key
|
||||||
|
type PrivKeyEd25519 [64]byte
|
||||||
|
```
|
||||||
|
|
||||||
|
### PubKeySecp256k1
|
||||||
|
|
||||||
|
```
|
||||||
|
// Name: tendermint/PubKeySecp256k1
|
||||||
|
// PrefixBytes: 0xEB5AE982
|
||||||
|
// Length: 0x21
|
||||||
|
// Notes: OpenSSL compressed pubkey prefixed with 0x02 or 0x03
|
||||||
|
type PubKeySecp256k1 [33]byte
|
||||||
|
|
||||||
|
func (pubkey PubKeySecp256k1) Address() []byte {
|
||||||
|
// NOTE: hash of the raw pubkey bytes (not Amino encoded!).
|
||||||
|
// Compatible with Bitcoin addresses.
|
||||||
|
return RIPEMD160(SHA256(pubkey[:]))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, the 33-byte Secp256k1 pubkey
|
||||||
|
`020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` would be
|
||||||
|
encoded as
|
||||||
|
`EB5AE98221020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9`
|
||||||
|
|
||||||
|
The address would then be
|
||||||
|
`RIPEMD160(SHA256(0x020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9))`
|
||||||
|
or `0AE5BEE929ABE51BAD345DB925EEA652680783FC`
|
||||||
|
|
||||||
|
### SignatureSecp256k1
|
||||||
|
|
||||||
|
```
|
||||||
|
// Name: tendermint/SignatureKeySecp256k1
|
||||||
|
// PrefixBytes: 0x16E1FEEA
|
||||||
|
// Length: Variable
|
||||||
|
// Encoding prefix: Variable
|
||||||
|
// Notes: raw bytes of the Secp256k1 signature
|
||||||
|
type SignatureSecp256k1 []byte
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, the Secp256k1 signature
|
||||||
|
`304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7`
|
||||||
|
would be encoded as
|
||||||
|
`16E1FEEA46304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7`
|
||||||
|
|
||||||
|
### PrivKeySecp256k1
|
||||||
|
|
||||||
|
```
|
||||||
|
// Name: tendermint/PrivKeySecp256k1
|
||||||
|
// Notes: raw 32-byte priv key
|
||||||
|
type PrivKeySecp256k1 [32]byte
|
||||||
|
```
|
||||||
|
|
||||||
|
## Other Common Types
|
||||||
|
|
||||||
|
### BitArray
|
||||||
|
|
||||||
|
The BitArray is used in block headers and some consensus messages to signal
|
||||||
|
whether or not something was done by each validator. BitArray is represented
|
||||||
|
with a struct containing the number of bits (`Bits`) and the bit-array itself
|
||||||
|
encoded in base64 (`Elems`).
|
||||||
|
|
||||||
|
```go
|
||||||
|
type BitArray struct {
|
||||||
|
Bits int
|
||||||
|
Elems []uint64
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This type is easily encoded directly by Amino.
|
||||||
|
|
||||||
|
Note BitArray receives a special JSON encoding in the form of `x` and `_`
|
||||||
|
representing `1` and `0`. Ie. the BitArray `10110` would be JSON encoded as
|
||||||
|
`"x_xx_"`
|
||||||
|
|
||||||
|
### Part
|
||||||
|
|
||||||
|
Part is used to break up blocks into pieces that can be gossiped in parallel
|
||||||
|
and securely verified using a Merkle tree of the parts.
|
||||||
|
|
||||||
|
Part contains the index of the part in the larger set (`Index`), the actual
|
||||||
|
underlying data of the part (`Bytes`), and a simple Merkle proof that the part is contained in
|
||||||
|
the larger set (`Proof`).
|
||||||
|
|
||||||
|
```go
|
||||||
|
type Part struct {
|
||||||
|
Index int
|
||||||
|
Bytes byte[]
|
||||||
|
Proof byte[]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### MakeParts
|
||||||
|
|
||||||
|
Encode an object using Amino and slice it into parts.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func MakeParts(obj interface{}, partSize int) []Part
|
||||||
|
```
|
||||||
|
|
||||||
|
## Merkle Trees
|
||||||
|
|
||||||
|
Simple Merkle trees are used in numerous places in Tendermint to compute a cryptographic digest of a data structure.
|
||||||
|
|
||||||
|
RIPEMD160 is always used as the hashing function.
|
||||||
|
|
||||||
|
### Simple Merkle Root
|
||||||
|
|
||||||
|
The function `SimpleMerkleRoot` is a simple recursive function defined as follows:
|
||||||
|
|
||||||
|
```go
|
||||||
|
func SimpleMerkleRoot(hashes [][]byte) []byte{
|
||||||
|
switch len(hashes) {
|
||||||
|
case 0:
|
||||||
|
return nil
|
||||||
|
case 1:
|
||||||
|
return hashes[0]
|
||||||
|
default:
|
||||||
|
left := SimpleMerkleRoot(hashes[:(len(hashes)+1)/2])
|
||||||
|
right := SimpleMerkleRoot(hashes[(len(hashes)+1)/2:])
|
||||||
|
return SimpleConcatHash(left, right)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func SimpleConcatHash(left, right []byte) []byte{
|
||||||
|
left = encodeByteSlice(left)
|
||||||
|
right = encodeByteSlice(right)
|
||||||
|
return RIPEMD160 (append(left, right))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that the leaves are Amino encoded as byte-arrays (ie. simple Uvarint length
|
||||||
|
prefix) before being concatenated together and hashed.
|
||||||
|
|
||||||
|
Note: we will abuse notion and invoke `SimpleMerkleRoot` with arguments of type `struct` or type `[]struct`.
|
||||||
|
For `struct` arguments, we compute a `[][]byte` by sorting elements of the `struct` according to
|
||||||
|
field name and then hashing them.
|
||||||
|
For `[]struct` arguments, we compute a `[][]byte` by hashing the individual `struct` elements.
|
||||||
|
|
||||||
|
### Simple Merkle Proof
|
||||||
|
|
||||||
|
Proof that a leaf is in a Merkle tree consists of a simple structure:
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
type SimpleProof struct {
|
||||||
|
Aunts [][]byte
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Which is verified using the following:
|
||||||
|
|
||||||
|
```
|
||||||
|
func (proof SimpleProof) Verify(index, total int, leafHash, rootHash []byte) bool {
|
||||||
|
computedHash := computeHashFromAunts(index, total, leafHash, proof.Aunts)
|
||||||
|
return computedHash == rootHash
|
||||||
|
}
|
||||||
|
|
||||||
|
func computeHashFromAunts(index, total int, leafHash []byte, innerHashes [][]byte) []byte{
|
||||||
|
assert(index < total && index >= 0 && total > 0)
|
||||||
|
|
||||||
|
if total == 1{
|
||||||
|
assert(len(proof.Aunts) == 0)
|
||||||
|
return leafHash
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(len(innerHashes) > 0)
|
||||||
|
|
||||||
|
numLeft := (total + 1) / 2
|
||||||
|
if index < numLeft {
|
||||||
|
leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1])
|
||||||
|
assert(leftHash != nil)
|
||||||
|
return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1])
|
||||||
|
}
|
||||||
|
rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1])
|
||||||
|
assert(rightHash != nil)
|
||||||
|
return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## JSON
|
||||||
|
|
||||||
|
### Amino
|
||||||
|
|
||||||
|
TODO: improve this
|
||||||
|
|
||||||
|
Amino also supports JSON encoding - registered types are simply encoded as:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"type": "<DisfixBytes>",
|
||||||
|
"value": <JSON>
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For instance, an ED25519 PubKey would look like:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"type": "AC26791624DE60",
|
||||||
|
"value": "uZ4h63OFWuQ36ZZ4Bd6NF+/w9fWUwrOncrQsackrsTk="
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Where the `"value"` is the base64 encoding of the raw pubkey bytes, and the
|
||||||
|
`"type"` is the full disfix bytes for Ed25519 pubkeys.
|
||||||
|
|
||||||
|
|
||||||
|
### Signed Messages
|
||||||
|
|
||||||
|
Signed messages (eg. votes, proposals) in the consensus are encoded using Amino-JSON, rather than in the standard binary format.
|
||||||
|
|
||||||
|
When signing, the elements of a message are sorted by key and the sorted message is embedded in an
|
||||||
|
outer JSON that includes a `chain_id` field.
|
||||||
|
We call this encoding the CanonicalSignBytes. For instance, CanonicalSignBytes for a vote would look
|
||||||
|
like:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{"chain_id":"my-chain-id","vote":{"block_id":{"hash":DEADBEEF,"parts":{"hash":BEEFDEAD,"total":3}},"height":3,"round":2,"timestamp":1234567890, "type":2}
|
||||||
|
```
|
||||||
|
|
||||||
|
Note how the fields within each level are sorted.
|
@@ -1,6 +1,46 @@
|
|||||||
# Tendermint Encoding
|
# Tendermint Encoding (Pre-Amino)
|
||||||
|
|
||||||
## Binary Serialization (TMBIN)
|
## PubKeys and Addresses
|
||||||
|
|
||||||
|
PubKeys are prefixed with a type-byte, followed by the raw bytes of the public
|
||||||
|
key.
|
||||||
|
|
||||||
|
Two keys are supported with the following type bytes:
|
||||||
|
|
||||||
|
```
|
||||||
|
TypeByteEd25519 = 0x1
|
||||||
|
TypeByteSecp256k1 = 0x2
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
// TypeByte: 0x1
|
||||||
|
type PubKeyEd25519 [32]byte
|
||||||
|
|
||||||
|
func (pub PubKeyEd25519) Encode() []byte {
|
||||||
|
return 0x1 | pub
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pub PubKeyEd25519) Address() []byte {
|
||||||
|
// NOTE: the length (0x0120) is also included
|
||||||
|
return RIPEMD160(0x1 | 0x0120 | pub)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeByte: 0x2
|
||||||
|
// NOTE: OpenSSL compressed pubkey (x-cord with 0x2 or 0x3)
|
||||||
|
type PubKeySecp256k1 [33]byte
|
||||||
|
|
||||||
|
func (pub PubKeySecp256k1) Encode() []byte {
|
||||||
|
return 0x2 | pub
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pub PubKeySecp256k1) Address() []byte {
|
||||||
|
return RIPEMD160(SHA256(pub))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
See https://github.com/tendermint/go-crypto/blob/v0.5.0/pub_key.go for more.
|
||||||
|
|
||||||
|
## Binary Serialization (go-wire)
|
||||||
|
|
||||||
Tendermint aims to encode data structures in a manner similar to how the corresponding Go structs
|
Tendermint aims to encode data structures in a manner similar to how the corresponding Go structs
|
||||||
are laid out in memory.
|
are laid out in memory.
|
@@ -74,11 +74,7 @@ func TotalVotingPower(vals []Validators) int64{
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### PubKey
|
|
||||||
|
|
||||||
TODO:
|
|
||||||
|
|
||||||
### ConsensusParams
|
### ConsensusParams
|
||||||
|
|
||||||
TODO:
|
TODO
|
||||||
|
|
166
docs/spec/consensus/abci.md
Normal file
166
docs/spec/consensus/abci.md
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
# Application Blockchain Interface (ABCI)
|
||||||
|
|
||||||
|
ABCI is the interface between Tendermint (a state-machine replication engine)
|
||||||
|
and an application (the actual state machine).
|
||||||
|
|
||||||
|
The ABCI message types are defined in a [protobuf
|
||||||
|
file](https://github.com/tendermint/abci/blob/master/types/types.proto).
|
||||||
|
|
||||||
|
For full details on the ABCI message types and protocol, see the [ABCI
|
||||||
|
specificaiton](https://github.com/tendermint/abci/blob/master/specification.rst).
|
||||||
|
Be sure to read the specification if you're trying to build an ABCI app!
|
||||||
|
|
||||||
|
For additional details on server implementation, see the [ABCI
|
||||||
|
readme](https://github.com/tendermint/abci#implementation).
|
||||||
|
|
||||||
|
Here we provide some more details around the use of ABCI by Tendermint and
|
||||||
|
clarify common "gotchas".
|
||||||
|
|
||||||
|
## ABCI connections
|
||||||
|
|
||||||
|
Tendermint opens 3 ABCI connections to the app: one for Consensus, one for
|
||||||
|
Mempool, one for Queries.
|
||||||
|
|
||||||
|
## Async vs Sync
|
||||||
|
|
||||||
|
The main ABCI server (ie. non-GRPC) provides ordered asynchronous messages.
|
||||||
|
This is useful for DeliverTx and CheckTx, since it allows Tendermint to forward
|
||||||
|
transactions to the app before it's finished processing previous ones.
|
||||||
|
|
||||||
|
Thus, DeliverTx and CheckTx messages are sent asycnhronously, while all other
|
||||||
|
messages are sent synchronously.
|
||||||
|
|
||||||
|
## CheckTx and Commit
|
||||||
|
|
||||||
|
It is typical to hold three distinct states in an ABCI app: CheckTxState, DeliverTxState,
|
||||||
|
QueryState. The QueryState contains the latest committed state for a block.
|
||||||
|
The CheckTxState and DeliverTxState may be updated concurrently with one another.
|
||||||
|
Before Commit is called, Tendermint locks and flushes the mempool so that no new changes will happen
|
||||||
|
to CheckTxState. When Commit completes, it unlocks the mempool.
|
||||||
|
|
||||||
|
Thus, during Commit, it is safe to reset the QueryState and the CheckTxState to the latest DeliverTxState
|
||||||
|
(ie. the new state from executing all the txs in the block).
|
||||||
|
|
||||||
|
Note, however, that it is not possible to send transactions to Tendermint during Commit - if your app
|
||||||
|
tries to send a `/broadcast_tx` to Tendermint during Commit, it will deadlock.
|
||||||
|
|
||||||
|
|
||||||
|
## EndBlock Validator Updates
|
||||||
|
|
||||||
|
Updates to the Tendermint validator set can be made by returning `Validator`
|
||||||
|
objects in the `ResponseBeginBlock`:
|
||||||
|
|
||||||
|
```
|
||||||
|
message Validator {
|
||||||
|
bytes pub_key = 1;
|
||||||
|
int64 power = 2;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The `pub_key` is the Amino encoded public key for the validator. For details on
|
||||||
|
Amino encoded public keys, see the [section of the encoding spec](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md#public-key-cryptography).
|
||||||
|
|
||||||
|
For Ed25519 pubkeys, the Amino prefix is always "1624DE6220". For example, the 32-byte Ed25519 pubkey
|
||||||
|
`76852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85` would be
|
||||||
|
Amino encoded as
|
||||||
|
`1624DE622076852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85`
|
||||||
|
|
||||||
|
(Note: in old versions of Tendermint (pre-v0.19.0), the pubkey is just prefixed with a
|
||||||
|
single type byte, so for ED25519 we'd have `pub_key = 0x1 | pub`)
|
||||||
|
|
||||||
|
The `power` is the new voting power for the validator, with the
|
||||||
|
following rules:
|
||||||
|
|
||||||
|
- power must be non-negative
|
||||||
|
- if power is 0, the validator must already exist, and will be removed from the
|
||||||
|
validator set
|
||||||
|
- if power is non-0:
|
||||||
|
- if the validator does not already exist, it will be added to the validator
|
||||||
|
set with the given power
|
||||||
|
- if the validator does already exist, its power will be adjusted to the given power
|
||||||
|
|
||||||
|
## Query
|
||||||
|
|
||||||
|
Query is a generic message type with lots of flexibility to enable diverse sets
|
||||||
|
of queries from applications. Tendermint has no requirements from the Query
|
||||||
|
message for normal operation - that is, the ABCI app developer need not implement Query functionality if they do not wish too.
|
||||||
|
That said, Tendermint makes a number of queries to support some optional
|
||||||
|
features. These are:
|
||||||
|
|
||||||
|
### Peer Filtering
|
||||||
|
|
||||||
|
When Tendermint connects to a peer, it sends two queries to the ABCI application
|
||||||
|
using the following paths, with no additional data:
|
||||||
|
|
||||||
|
- `/p2p/filter/addr/<IP:PORT>`, where `<IP:PORT>` denote the IP address and
|
||||||
|
the port of the connection
|
||||||
|
- `p2p/filter/pubkey/<ID>`, where `<ID>` is the peer node ID (ie. the
|
||||||
|
pubkey.Address() for the peer's PubKey)
|
||||||
|
|
||||||
|
If either of these queries return a non-zero ABCI code, Tendermint will refuse
|
||||||
|
to connect to the peer.
|
||||||
|
|
||||||
|
## Info and the Handshake/Replay
|
||||||
|
|
||||||
|
On startup, Tendermint calls Info on the Query connection to get the latest
|
||||||
|
committed state of the app. The app MUST return information consistent with the
|
||||||
|
last block it succesfully completed Commit for.
|
||||||
|
|
||||||
|
If the app succesfully committed block H but not H+1, then `last_block_height =
|
||||||
|
H` and `last_block_app_hash = <hash returned by Commit for block H>`. If the app
|
||||||
|
failed during the Commit of block H, then `last_block_height = H-1` and
|
||||||
|
`last_block_app_hash = <hash returned by Commit for block H-1, which is the hash
|
||||||
|
in the header of block H>`.
|
||||||
|
|
||||||
|
We now distinguish three heights, and describe how Tendermint syncs itself with
|
||||||
|
the app.
|
||||||
|
|
||||||
|
```
|
||||||
|
storeBlockHeight = height of the last block Tendermint saw a commit for
|
||||||
|
stateBlockHeight = height of the last block for which Tendermint completed all
|
||||||
|
block processing and saved all ABCI results to disk
|
||||||
|
appBlockHeight = height of the last block for which ABCI app succesfully
|
||||||
|
completely Commit
|
||||||
|
```
|
||||||
|
|
||||||
|
Note we always have `storeBlockHeight >= stateBlockHeight` and `storeBlockHeight >= appBlockHeight`
|
||||||
|
Note also we never call Commit on an ABCI app twice for the same height.
|
||||||
|
|
||||||
|
The procedure is as follows.
|
||||||
|
|
||||||
|
First, some simeple start conditions:
|
||||||
|
|
||||||
|
If `appBlockHeight == 0`, then call InitChain.
|
||||||
|
|
||||||
|
If `storeBlockHeight == 0`, we're done.
|
||||||
|
|
||||||
|
Now, some sanity checks:
|
||||||
|
|
||||||
|
If `storeBlockHeight < appBlockHeight`, error
|
||||||
|
If `storeBlockHeight < stateBlockHeight`, panic
|
||||||
|
If `storeBlockHeight > stateBlockHeight+1`, panic
|
||||||
|
|
||||||
|
Now, the meat:
|
||||||
|
|
||||||
|
If `storeBlockHeight == stateBlockHeight && appBlockHeight < storeBlockHeight`,
|
||||||
|
replay all blocks in full from `appBlockHeight` to `storeBlockHeight`.
|
||||||
|
This happens if we completed processing the block, but the app forgot its height.
|
||||||
|
|
||||||
|
If `storeBlockHeight == stateBlockHeight && appBlockHeight == storeBlockHeight`, we're done
|
||||||
|
This happens if we crashed at an opportune spot.
|
||||||
|
|
||||||
|
If `storeBlockHeight == stateBlockHeight+1`
|
||||||
|
This happens if we started processing the block but didn't finish.
|
||||||
|
|
||||||
|
If `appBlockHeight < stateBlockHeight`
|
||||||
|
replay all blocks in full from `appBlockHeight` to `storeBlockHeight-1`,
|
||||||
|
and replay the block at `storeBlockHeight` using the WAL.
|
||||||
|
This happens if the app forgot the last block it committed.
|
||||||
|
|
||||||
|
If `appBlockHeight == stateBlockHeight`,
|
||||||
|
replay the last block (storeBlockHeight) in full.
|
||||||
|
This happens if we crashed before the app finished Commit
|
||||||
|
|
||||||
|
If appBlockHeight == storeBlockHeight {
|
||||||
|
update the state using the saved ABCI responses but dont run the block against the real app.
|
||||||
|
This happens if we crashed after the app finished Commit but before Tendermint saved the state.
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user