mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-16 04:41:59 +00:00
Compare commits
474 Commits
v0.16.0
...
v0.19.5-rc
Author | SHA1 | Date | |
---|---|---|---|
|
0a9dc9f875 | ||
|
87cefb724d | ||
|
6701dba876 | ||
|
442bbe592f | ||
|
301aa92f9c | ||
|
52f27686ef | ||
|
6f9867cba6 | ||
|
02615c8695 | ||
|
2df137193c | ||
|
1ef415728d | ||
|
773e3917ec | ||
|
26fdfe10fd | ||
|
d76e2dc3ff | ||
|
420f925a4d | ||
|
d7d12c8030 | ||
|
6c4a26f248 | ||
|
2a26c47da5 | ||
|
aabe96f1af | ||
|
0e1f730fbb | ||
|
0b68ec4b8e | ||
|
ca120798e4 | ||
|
595fc24c56 | ||
|
4611cf44f0 | ||
|
0fb33ca91d | ||
|
35428ceb53 | ||
|
de8d4325de | ||
|
5a041baa36 | ||
|
202a43a5af | ||
|
2987158a65 | ||
|
c9001d5a11 | ||
|
90446261f3 | ||
|
ae572b9038 | ||
|
0908e668bd | ||
|
e0dbc3673c | ||
|
545990f845 | ||
|
19ccd1842f | ||
|
b4d6bf7697 | ||
|
1854ce41fc | ||
|
547e8223b9 | ||
|
8e46df14e7 | ||
|
8d60a5a7bd | ||
|
5115618550 | ||
|
a6b74b82d1 | ||
|
e5220360c5 | ||
|
b5c4098c53 | ||
|
bc8768cfea | ||
|
d832bde280 | ||
|
5e3a23df6d | ||
|
6f7333fd5f | ||
|
58e3246ffc | ||
|
bbe1355957 | ||
|
7c14fa820d | ||
|
0d93424c6a | ||
|
efc01cf582 | ||
|
754be1887c | ||
|
775b015173 | ||
|
68a0b3f95b | ||
|
b1f3c11948 | ||
|
e1a3f16fa4 | ||
|
aab98828fe | ||
|
03f6a29a64 | ||
|
4851653d8e | ||
|
162811476a | ||
|
b5ac9ede8a | ||
|
ff5dfc0c15 | ||
|
d3a98675aa | ||
|
e3c4625e63 | ||
|
01ac378c96 | ||
|
83ca46396d | ||
|
2c125b6c78 | ||
|
3dde0584ed | ||
|
4bca7c1009 | ||
|
4be3ffbe9b | ||
|
5b9a1423ae | ||
|
16932f889f | ||
|
e9804d76cf | ||
|
a41f0d3891 | ||
|
658060150c | ||
|
d0229e8b1e | ||
|
56c9e0da7e | ||
|
fbe253767e | ||
|
edbec10f9e | ||
|
c0a1a8d3c0 | ||
|
ac2d3a917e | ||
|
64408a4041 | ||
|
cae31157b1 | ||
|
66c2b60324 | ||
|
e2e2127365 | ||
|
f395b82f73 | ||
|
47557f868a | ||
|
b6c062c451 | ||
|
12fc396101 | ||
|
c195772de1 | ||
|
d3c4f746a7 | ||
|
fae94a44a2 | ||
|
3a30ee75b9 | ||
|
3498b676a6 | ||
|
6157c700dd | ||
|
c90bf77566 | ||
|
6805ddf1b8 | ||
|
2761861b6b | ||
|
64569b15e5 | ||
|
0450e35d67 | ||
|
268055e549 | ||
|
aaa81092e7 | ||
|
3ee1d7909e | ||
|
32268a8135 | ||
|
f645187122 | ||
|
40c79235c0 | ||
|
c23909eecf | ||
|
936d1a0e68 | ||
|
0cbbb61962 | ||
|
fa66694f2e | ||
|
d92def4b60 | ||
|
26f633ed48 | ||
|
79bfbebfff | ||
|
f33da8817a | ||
|
ffe81a0206 | ||
|
0e00154fcc | ||
|
1ab89e6cbf | ||
|
14cff484f1 | ||
|
25cee8827a | ||
|
65ebbccb74 | ||
|
1188dfe7ee | ||
|
c45ba2967a | ||
|
f67c5a9e7b | ||
|
593a785ae2 | ||
|
94e823cc91 | ||
|
47e4d64973 | ||
|
a2d77cbe4e | ||
|
390b592dec | ||
|
9ab1fafdf1 | ||
|
94c016a04e | ||
|
97f3ada9c2 | ||
|
d7d4471072 | ||
|
d48a6f930d | ||
|
389a6ffa16 | ||
|
f1ead2df70 | ||
|
e5951acfb4 | ||
|
0e1414ef9d | ||
|
97be1eef87 | ||
|
0d9004a854 | ||
|
91c81ef9a1 | ||
|
e2f0778c14 | ||
|
63f8c58009 | ||
|
9ba208c1f5 | ||
|
6ce6b20993 | ||
|
5361073439 | ||
|
6c04465d3d | ||
|
089ce6744c | ||
|
17a5c6fa1a | ||
|
18c3f8f3f1 | ||
|
b42d5a2211 | ||
|
b20e777f53 | ||
|
659762736c | ||
|
ece3f678da | ||
|
5b5acbb343 | ||
|
a2930cd723 | ||
|
45a05b4726 | ||
|
8bdfe15de9 | ||
|
b3904b8da8 | ||
|
f2dae2a2d8 | ||
|
1706ce6f7f | ||
|
d0beaba7e8 | ||
|
c28784de5e | ||
|
d06390638d | ||
|
3a0edc561d | ||
|
f8ed578325 | ||
|
5babaf9a88 | ||
|
c0610b2c32 | ||
|
1db2224241 | ||
|
0323b03daf | ||
|
379f9f875b | ||
|
ab00bf7c8b | ||
|
64879c1e6a | ||
|
7c22e47629 | ||
|
e88f74bb9b | ||
|
384b3ea065 | ||
|
6a48bd0c88 | ||
|
d93e177a69 | ||
|
cef053386b | ||
|
cca1dd8e3e | ||
|
26c38e770e | ||
|
609452958c | ||
|
c954fca376 | ||
|
9be16d56ba | ||
|
2b732bc11a | ||
|
dcd00b0e68 | ||
|
ff3f35c5f4 | ||
|
93c4312cdd | ||
|
1a1e4e767b | ||
|
bb1b249e8a | ||
|
c778d7f5d1 | ||
|
bb9b12d67a | ||
|
767521ac52 | ||
|
df9bf60b05 | ||
|
466c3ab1c7 | ||
|
c68d406195 | ||
|
02c0835e9b | ||
|
c170800fbd | ||
|
7afe74a963 | ||
|
5d8767e656 | ||
|
54adb790f2 | ||
|
02531ca5a3 | ||
|
d24e4cb821 | ||
|
fb64314d1c | ||
|
4930b61a38 | ||
|
9cc2cf362f | ||
|
ed93fb34ab | ||
|
3d32474da8 | ||
|
3233c318ea | ||
|
32e1d195a0 | ||
|
3ca5292dc9 | ||
|
c541d58d2f | ||
|
3037b5b7ca | ||
|
c9a263c589 | ||
|
e4492afbad | ||
|
799beebd36 | ||
|
45ec5fd170 | ||
|
6e39ec6e26 | ||
|
d38a6cc7ea | ||
|
7f6ee7a46b | ||
|
34b77fcad4 | ||
|
3b3f45d49b | ||
|
3284a13fee | ||
|
fc9ffee2e3 | ||
|
3a672cb2a9 | ||
|
4b8e342309 | ||
|
5a2fa71b03 | ||
|
9a57ef9cbf | ||
|
59ca9bf480 | ||
|
7cce07bc99 | ||
|
0ae66f75ce | ||
|
5d1c758730 | ||
|
1b9323f105 | ||
|
cee7b5cb54 | ||
|
1585152341 | ||
|
8e699c2bfd | ||
|
904a3115a6 | ||
|
a506cf47ad | ||
|
7689c15413 | ||
|
f907113c19 | ||
|
140f962201 | ||
|
c23d907f12 | ||
|
ed782e7508 | ||
|
0732526465 | ||
|
39a4963782 | ||
|
37ce6b195a | ||
|
7aa6d36258 | ||
|
991017fc41 | ||
|
5f548c7679 | ||
|
d14aacf03e | ||
|
39ff4d22e9 | ||
|
196f8410ba | ||
|
8462493cbf | ||
|
47b8bd1728 | ||
|
89cdde7f1e | ||
|
657fd671ea | ||
|
315c475b79 | ||
|
5ef639fcbe | ||
|
b800b4ec1d | ||
|
208ac32fa2 | ||
|
641476d40f | ||
|
491c8ab4c1 | ||
|
5ef8a6e887 | ||
|
d694d47d22 | ||
|
ecdc1b9bb0 | ||
|
9c757108ca | ||
|
5243e54641 | ||
|
70e7454c21 | ||
|
2644a529f0 | ||
|
eaee98ee1f | ||
|
35a1d747b0 | ||
|
34974e3932 | ||
|
575a46d9d4 | ||
|
bcadbd1b10 | ||
|
ead9daf1ba | ||
|
22949e6dfd | ||
|
49986b05bc | ||
|
2fa7af4614 | ||
|
2d857c4b1b | ||
|
2b63f57b4c | ||
|
4085c72496 | ||
|
6f9956990c | ||
|
9bf5862def | ||
|
e1d98bb7f6 | ||
|
e5cd006bce | ||
|
58242e1b63 | ||
|
4e86835163 | ||
|
ab4ac04c88 | ||
|
2c1887a635 | ||
|
1c82281b77 | ||
|
43ac92b615 | ||
|
901b456151 | ||
|
8813684040 | ||
|
58f36bb321 | ||
|
4c2f56626a | ||
|
e3337d764a | ||
|
214817ed17 | ||
|
116a4ec705 | ||
|
bbaad22982 | ||
|
a7250af303 | ||
|
6545a21369 | ||
|
416f03c05b | ||
|
ced74251e9 | ||
|
6c345f9fa2 | ||
|
8c0c8e8e01 | ||
|
79315efd1f | ||
|
a61130aebb | ||
|
5a51a0ba06 | ||
|
0d0b56739d | ||
|
eb1816c9ff | ||
|
50ae892d5e | ||
|
5a79b3d74a | ||
|
460599ef75 | ||
|
830bb72d6f | ||
|
b11c26cc1c | ||
|
152290db7e | ||
|
20b198681b | ||
|
2bf106a1b3 | ||
|
2c445059f2 | ||
|
d8b08cd943 | ||
|
ab59f64f57 | ||
|
42e3457884 | ||
|
31f3dd42e7 | ||
|
5fab8e404d | ||
|
701df09971 | ||
|
d350da3135 | ||
|
ab7dea4f20 | ||
|
b297efb532 | ||
|
eaabdb5cac | ||
|
066aee3045 | ||
|
ff1ec0260e | ||
|
7cb3188fbc | ||
|
9b9022f8df | ||
|
68e049d3af | ||
|
86ddf17db0 | ||
|
a655500047 | ||
|
714f885dac | ||
|
b0d8f552c5 | ||
|
63cb69cb96 | ||
|
266974cb59 | ||
|
bcf54b0aa3 | ||
|
d86855ad7a | ||
|
d0c67bbe16 | ||
|
4242352852 | ||
|
f299689573 | ||
|
baf457e6d4 | ||
|
0c7e871ef0 | ||
|
87ce804b4a | ||
|
2a258a2c3f | ||
|
a40518c7da | ||
|
31deaa4a79 | ||
|
736ea055a8 | ||
|
a39aec0bae | ||
|
8bef3eb1f4 | ||
|
244d88dfda | ||
|
76e1dd41e4 | ||
|
e39187a063 | ||
|
cd2ba4aa7f | ||
|
0de19420f6 | ||
|
f3000d0c84 | ||
|
fc5b0471d9 | ||
|
264bce4ddd | ||
|
0f41570c80 | ||
|
8723c91db9 | ||
|
f85c8896d9 | ||
|
f0d4f56327 | ||
|
3d5c05e4e6 | ||
|
018da09f14 | ||
|
60a64af28d | ||
|
13a2013229 | ||
|
1941b5c769 | ||
|
21e2c41c6b | ||
|
589781721a | ||
|
2ce57a65ff | ||
|
2aa77025c3 | ||
|
8e1856a90a | ||
|
ca619c80b6 | ||
|
25ff699425 | ||
|
879b4c0a2c | ||
|
45d07a3d0b | ||
|
788354d81e | ||
|
b7ce89e568 | ||
|
8d81a259c7 | ||
|
3019761204 | ||
|
6120a4c5e4 | ||
|
533ed2a876 | ||
|
d4e4055d57 | ||
|
ee51ad8e29 | ||
|
bdd50c5f37 | ||
|
3d88612690 | ||
|
630d54c95a | ||
|
3cedd8cf07 | ||
|
929f326dd2 | ||
|
ff8c648c23 | ||
|
8bceb5ce36 | ||
|
8f2703e8b2 | ||
|
c394eef7b8 | ||
|
f9921ae362 | ||
|
fff0c6cd8e | ||
|
59872bf335 | ||
|
656854186c | ||
|
6596bff8ec | ||
|
eaafd9d61c | ||
|
5378bfc5c7 | ||
|
abeeeeb611 | ||
|
ca3655a409 | ||
|
6cf5100645 | ||
|
51628aea08 | ||
|
3395f5fb0e | ||
|
d2cd079541 | ||
|
fc35e3b8c5 | ||
|
085b4f5f2e | ||
|
fd58645dd2 | ||
|
200787ede2 | ||
|
9cdba04fe9 | ||
|
e92c87630d | ||
|
d4e93a6de3 | ||
|
4670857c15 | ||
|
e8d8aedd1f | ||
|
87372da730 | ||
|
3b40b62d04 | ||
|
c41cbf2a07 | ||
|
1a3faa8db1 | ||
|
4ce79baac7 | ||
|
056b70b4ce | ||
|
4806b3b9bf | ||
|
2a8f0000b2 | ||
|
dd2d846c02 | ||
|
2ae87eee4e | ||
|
4be23027ed | ||
|
c19bbb2403 | ||
|
edb871f514 | ||
|
9c5937df96 | ||
|
be6082df8e | ||
|
66354de219 | ||
|
458a40f74e | ||
|
0821384ac6 | ||
|
e01650f21d | ||
|
8dd06cf197 | ||
|
93732b4c1e | ||
|
2cc63069c6 | ||
|
6270ecef8c | ||
|
9293ae76bf | ||
|
74d3f7e1fd | ||
|
2fd023a239 | ||
|
ffd2483e67 | ||
|
a14aab67de | ||
|
106d804357 | ||
|
a1020307a0 | ||
|
6c70b4ce05 | ||
|
2a292efb56 | ||
|
82b1a34a36 | ||
|
8da2a6a147 | ||
|
7d71e702d8 | ||
|
38d18ca11a | ||
|
32d9563a15 | ||
|
18f7e52562 | ||
|
fec541373d | ||
|
ff600e9aa0 | ||
|
a49357b19e | ||
|
4b997c29ee | ||
|
d321839669 | ||
|
c27fda09dd | ||
|
23eb84db35 | ||
|
bef91ea7fe | ||
|
459633fb4c | ||
|
9ed296ae71 | ||
|
e8d0960cef | ||
|
2023115ff8 | ||
|
7790ae9e6f | ||
|
206da7a1b8 | ||
|
14eaba9ec3 |
201
.circleci/config.yml
Normal file
201
.circleci/config.yml
Normal file
@@ -0,0 +1,201 @@
|
||||
version: 2
|
||||
|
||||
defaults: &defaults
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
docker:
|
||||
- image: circleci/golang:1.10.0
|
||||
environment:
|
||||
GOBIN: /tmp/workspace/bin
|
||||
|
||||
jobs:
|
||||
setup_dependencies:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- run: mkdir -p /tmp/workspace/bin
|
||||
- run: mkdir -p /tmp/workspace/profiles
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v1-pkg-cache
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run:
|
||||
name: binaries
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make install
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- bin
|
||||
- profiles
|
||||
- save_cache:
|
||||
key: v1-pkg-cache
|
||||
paths:
|
||||
- /go/pkg
|
||||
- save_cache:
|
||||
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
paths:
|
||||
- /go/src/github.com/tendermint/tendermint
|
||||
|
||||
setup_abci:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v1-pkg-cache
|
||||
- restore_cache:
|
||||
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: Checkout abci
|
||||
command: |
|
||||
commit=$(bash scripts/dep_utils/parse.sh abci)
|
||||
go get -v -u -d github.com/tendermint/abci/...
|
||||
cd /go/src/github.com/tendermint/abci
|
||||
git checkout "$commit"
|
||||
- run:
|
||||
working_directory: /go/src/github.com/tendermint/abci
|
||||
name: Install abci
|
||||
command: |
|
||||
set -ex
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make install
|
||||
- run: ls -lah /tmp/workspace/bin
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- "bin/abci*"
|
||||
|
||||
lint:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v1-pkg-cache
|
||||
- restore_cache:
|
||||
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: metalinter
|
||||
command: |
|
||||
set -ex
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make metalinter
|
||||
|
||||
test_apps:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v1-pkg-cache
|
||||
- restore_cache:
|
||||
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils
|
||||
- run:
|
||||
name: Run tests
|
||||
command: bash test/app/test.sh
|
||||
|
||||
test_cover:
|
||||
<<: *defaults
|
||||
parallelism: 4
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v1-pkg-cache
|
||||
- restore_cache:
|
||||
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: Run tests
|
||||
command: |
|
||||
for pkg in $(go list github.com/tendermint/tendermint/... | grep -v /vendor/ | circleci tests split --split-by=timings); do
|
||||
id=$(basename "$pkg")
|
||||
|
||||
go test -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg"
|
||||
done
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- "profiles/*"
|
||||
|
||||
test_persistence:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v1-pkg-cache
|
||||
- restore_cache:
|
||||
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: Run tests
|
||||
command: bash test/persist/test_failure_indices.sh
|
||||
|
||||
test_p2p:
|
||||
environment:
|
||||
GOBIN: /home/circleci/.go_workspace/bin
|
||||
GOPATH: /home/circleci/.go_workspace
|
||||
machine:
|
||||
image: circleci/classic:latest
|
||||
steps:
|
||||
- checkout
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
- run: bash test/p2p/circleci.sh
|
||||
|
||||
upload_coverage:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- run:
|
||||
name: gather
|
||||
command: |
|
||||
set -ex
|
||||
|
||||
echo "mode: atomic" > coverage.txt
|
||||
for prof in $(ls /tmp/workspace/profiles/); do
|
||||
tail -n +2 /tmp/workspace/profiles/"$prof" >> coverage.txt
|
||||
done
|
||||
- run:
|
||||
name: upload
|
||||
command: bash <(curl -s https://codecov.io/bash) -f coverage.txt
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test-suite:
|
||||
jobs:
|
||||
- setup_dependencies
|
||||
- setup_abci:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- lint:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- test_apps:
|
||||
requires:
|
||||
- setup_abci
|
||||
- test_cover:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- test_persistence:
|
||||
requires:
|
||||
- setup_abci
|
||||
- test_p2p
|
||||
- upload_coverage:
|
||||
requires:
|
||||
- test_cover
|
26
.codecov.yml
26
.codecov.yml
@@ -1,26 +0,0 @@
|
||||
|
||||
#
|
||||
# This codecov.yml is the default configuration for
|
||||
# all repositories on Codecov. You may adjust the settings
|
||||
# below in your own codecov.yml in your repository.
|
||||
#
|
||||
coverage:
|
||||
precision: 2
|
||||
round: down
|
||||
range: 70...100
|
||||
|
||||
status:
|
||||
# Learn more at https://codecov.io/docs#yaml_default_commit_status
|
||||
project:
|
||||
default:
|
||||
threshold: 1% # allow this much decrease on project
|
||||
changes: false
|
||||
|
||||
comment:
|
||||
layout: "header, diff"
|
||||
behavior: default # update if exists else create new
|
||||
|
||||
ignore:
|
||||
- "docs"
|
||||
- "*.md"
|
||||
- "*.rst"
|
3
.github/ISSUE_TEMPLATE
vendored
3
.github/ISSUE_TEMPLATE
vendored
@@ -37,5 +37,8 @@ in a case of bug.
|
||||
|
||||
**How to reproduce it** (as minimally and precisely as possible):
|
||||
|
||||
**Logs (you can paste a part showing an error or attach the whole file)**:
|
||||
|
||||
**`/dump_consensus_state` output for consensus bugs**
|
||||
|
||||
**Anything else do we need to know**:
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -17,7 +17,11 @@ test/logs
|
||||
coverage.txt
|
||||
docs/_build
|
||||
docs/tools
|
||||
docs/abci-spec.rst
|
||||
*.log
|
||||
|
||||
scripts/wal2json/wal2json
|
||||
scripts/cutWALUntil/cutWALUntil
|
||||
|
||||
.idea/
|
||||
*.iml
|
||||
|
195
CHANGELOG.md
195
CHANGELOG.md
@@ -1,31 +1,190 @@
|
||||
# Changelog
|
||||
|
||||
## Roadmap
|
||||
## 0.19.5
|
||||
|
||||
BREAKING CHANGES:
|
||||
- Better support for injecting randomness
|
||||
- Upgrade consensus for more real-time use of evidence
|
||||
*May 20th, 2018*
|
||||
|
||||
BREAKING CHANGES
|
||||
|
||||
- [rpc/client] TxSearch and UnconfirmedTxs have new arguments (see below)
|
||||
- [version] Breaking changes to Go APIs will not be reflected in breaking
|
||||
version change, but will be included in changelog.
|
||||
|
||||
FEATURES
|
||||
|
||||
- [rpc] `/tx_search` takes `page` and `per_page` args to paginate results
|
||||
- [rpc] `/unconfirmed_txs` takes `limit` arg to limit the output
|
||||
- [config] `mempool.size` and `mempool.cache_size` options
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
- [docs] Lots of updates
|
||||
|
||||
BUG FIXES
|
||||
|
||||
- [mempool] Enforce upper bound on number of transactions
|
||||
|
||||
## 0.19.4 (May 17th, 2018)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
- [state] Improve tx indexing by using batches
|
||||
- [consensus, state] Improve logging (more consensus logs, fewer tx logs)
|
||||
- [spec] Moved to `docs/spec` (TODO cleanup the rest of the docs ...)
|
||||
|
||||
|
||||
BUG FIXES
|
||||
|
||||
- [consensus] Fix issue #1575 where a late proposer can get stuck
|
||||
|
||||
BREAKING:
|
||||
- [rpc] `/tx_search` now outputs maximum `?per_page` txs (you can provide custom `?per_page` up to 100; defaults to 30). You can set the `?page` (starts at 1).
|
||||
- [rpc] `/unconfirmed_txs` now outputs maximum `?limit` txs (you can provide custom `?limit` up to 100; defaults to 30)
|
||||
|
||||
## 0.19.3 (May 14th, 2018)
|
||||
|
||||
FEATURES
|
||||
|
||||
- [rpc] New `/consensus_state` returns just the votes seen at the current height
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
- [rpc] Add stringified votes and fraction of power voted to `/dump_consensus_state`
|
||||
- [rpc] Add PeerStateStats to `/dump_consensus_state`
|
||||
|
||||
BUG FIXES
|
||||
|
||||
- [cmd] Set GenesisTime during `tendermint init`
|
||||
- [consensus] fix ValidBlock rules
|
||||
|
||||
## 0.19.2 (April 30th, 2018)
|
||||
|
||||
FEATURES:
|
||||
- Peer reputation management
|
||||
- Use the chain as its own CA for nodes and validators
|
||||
- Tooling to run multiple blockchains/apps, possibly in a single process
|
||||
- State syncing (without transaction replay)
|
||||
- Add authentication and rate-limitting to the RPC
|
||||
|
||||
- [p2p] Allow peers with different Minor versions to connect
|
||||
- [rpc] `/net_info` includes `n_peers`
|
||||
|
||||
IMPROVEMENTS:
|
||||
- Improve subtleties around mempool caching and logic
|
||||
- Consensus optimizations:
|
||||
- cache block parts for faster agreement after round changes
|
||||
- propagate block parts rarest first
|
||||
- Better testing of the consensus state machine (ie. use a DSL)
|
||||
- Auto compiled serialization/deserialization code instead of go-wire reflection
|
||||
|
||||
- [p2p] Various code comments, cleanup, error types
|
||||
- [p2p] Change some Error logs to Debug
|
||||
|
||||
BUG FIXES:
|
||||
- Graceful handling/recovery for apps that have non-determinism or fail to halt
|
||||
- Graceful handling/recovery for violations of safety, or liveness
|
||||
|
||||
## 0.16.0 (February 20th, 2017)
|
||||
- [p2p] Fix reconnect to persistent peer when first dial fails
|
||||
- [p2p] Validate NodeInfo.ListenAddr
|
||||
- [p2p] Only allow (MaxNumPeers - MaxNumOutboundPeers) inbound peers
|
||||
- [p2p/pex] Limit max msg size to 64kB
|
||||
- [p2p] Fix panic when pex=false
|
||||
- [p2p] Allow multiple IPs per ID in AddrBook
|
||||
- [p2p] Fix before/after bugs in addrbook isBad()
|
||||
|
||||
## 0.19.1 (April 27th, 2018)
|
||||
|
||||
Note this release includes some small breaking changes in the RPC and one in the
|
||||
config that are really bug fixes. v0.19.1 will work with existing chains, and make Tendermint
|
||||
easier to use and debug. With <3
|
||||
|
||||
BREAKING (MINOR)
|
||||
|
||||
- [config] Removed `wal_light` setting. If you really needed this, let us know
|
||||
|
||||
FEATURES:
|
||||
|
||||
- [networks] moved in tooling from devops repo: terraform and ansible scripts for deploying testnets !
|
||||
- [cmd] Added `gen_node_key` command
|
||||
|
||||
BUG FIXES
|
||||
|
||||
Some of these are breaking in the RPC response, but they're really bugs!
|
||||
|
||||
- [spec] Document address format and pubkey encoding pre and post Amino
|
||||
- [rpc] Lower case JSON field names
|
||||
- [rpc] Fix missing entries, improve, and lower case the fields in `/dump_consensus_state`
|
||||
- [rpc] Fix NodeInfo.Channels format to hex
|
||||
- [rpc] Add Validator address to `/status`
|
||||
- [rpc] Fix `prove` in ABCIQuery
|
||||
- [cmd] MarshalJSONIndent on init
|
||||
|
||||
## 0.19.0 (April 13th, 2018)
|
||||
|
||||
BREAKING:
|
||||
- [cmd] improved `testnet` command; now it can fill in `persistent_peers` for you in the config file and much more (see `tendermint testnet --help` for details)
|
||||
- [cmd] `show_node_id` now returns an error if there is no node key
|
||||
- [rpc]: changed the output format for the `/status` endpoint (see https://godoc.org/github.com/tendermint/tendermint/rpc/core#Status)
|
||||
|
||||
Upgrade from go-wire to go-amino. This is a sweeping change that breaks everything that is
|
||||
serialized to disk or over the network.
|
||||
|
||||
See github.com/tendermint/go-amino for details on the new format.
|
||||
|
||||
See `scripts/wire2amino.go` for a tool to upgrade
|
||||
genesis/priv_validator/node_key JSON files.
|
||||
|
||||
FEATURES
|
||||
|
||||
- [test] docker-compose for local testnet setup (thanks Greg!)
|
||||
|
||||
## 0.18.0 (April 6th, 2018)
|
||||
|
||||
BREAKING:
|
||||
|
||||
- [types] Merkle tree uses different encoding for varints (see tmlibs v0.8.0)
|
||||
- [types] ValidtorSet.GetByAddress returns -1 if no validator found
|
||||
- [p2p] require all addresses come with an ID no matter what
|
||||
- [rpc] Listening address must contain tcp:// or unix:// prefix
|
||||
|
||||
FEATURES:
|
||||
|
||||
- [rpc] StartHTTPAndTLSServer (not used yet)
|
||||
- [rpc] Include validator's voting power in `/status`
|
||||
- [rpc] `/tx` and `/tx_search` responses now include the transaction hash
|
||||
- [rpc] Include peer NodeIDs in `/net_info`
|
||||
|
||||
IMPROVEMENTS:
|
||||
- [config] trim whitespace from elements of lists (like `persistent_peers`)
|
||||
- [rpc] `/tx_search` results are sorted by height
|
||||
- [p2p] do not try to connect to ourselves (ok, maybe only once)
|
||||
- [p2p] seeds respond with a bias towards good peers
|
||||
|
||||
BUG FIXES:
|
||||
- [rpc] fix subscribing using an abci.ResponseDeliverTx tag
|
||||
- [rpc] fix tx_indexers matchRange
|
||||
- [rpc] fix unsubscribing (see tmlibs v0.8.0)
|
||||
|
||||
## 0.17.1 (March 27th, 2018)
|
||||
|
||||
BUG FIXES:
|
||||
- [types] Actually support `app_state` in genesis as `AppStateJSON`
|
||||
|
||||
## 0.17.0 (March 27th, 2018)
|
||||
|
||||
BREAKING:
|
||||
- [types] WriteSignBytes -> SignBytes
|
||||
|
||||
IMPROVEMENTS:
|
||||
- [all] renamed `dummy` (`persistent_dummy`) to `kvstore` (`persistent_kvstore`) (name "dummy" is deprecated and will not work in the next breaking release)
|
||||
- [docs] note on determinism (docs/determinism.rst)
|
||||
- [genesis] `app_options` field is deprecated. please rename it to `app_state` in your genesis file(s). `app_options` will not work in the next breaking release
|
||||
- [p2p] dial seeds directly without potential peers
|
||||
- [p2p] exponential backoff for addrs in the address book
|
||||
- [p2p] mark peer as good if it contributed enough votes or block parts
|
||||
- [p2p] stop peer if it sends incorrect data, msg to unknown channel, msg we did not expect
|
||||
- [p2p] when `auth_enc` is true, all dialed peers must have a node ID in their address
|
||||
- [spec] various improvements
|
||||
- switched from glide to dep internally for package management
|
||||
- [wire] prep work for upgrading to new go-wire (which is now called go-amino)
|
||||
|
||||
FEATURES:
|
||||
- [config] exposed `auth_enc` flag to enable/disable encryption
|
||||
- [config] added the `--p2p.private_peer_ids` flag and `PrivatePeerIDs` config variable (see config for description)
|
||||
- [rpc] added `/health` endpoint, which returns empty result for now
|
||||
- [types/priv_validator] new format and socket client, allowing for remote signing
|
||||
|
||||
BUG FIXES:
|
||||
- [consensus] fix liveness bug by introducing ValidBlock mechanism
|
||||
|
||||
## 0.16.0 (February 20th, 2018)
|
||||
|
||||
BREAKING CHANGES:
|
||||
- [config] use $TMHOME/config for all config and json files
|
||||
|
@@ -34,15 +34,26 @@ Please don't make Pull Requests to `master`.
|
||||
|
||||
## Dependencies
|
||||
|
||||
We use [glide](https://github.com/masterminds/glide) to manage dependencies.
|
||||
That said, the master branch of every Tendermint repository should just build with `go get`, which means they should be kept up-to-date with their dependencies so we can get away with telling people they can just `go get` our software.
|
||||
Since some dependencies are not under our control, a third party may break our build, in which case we can fall back on `glide install`. Even for dependencies under our control, glide helps us keeps multiple repos in sync as they evolve. Anything with an executable, such as apps, tools, and the core, should use glide.
|
||||
We use [dep](https://github.com/golang/dep) to manage dependencies.
|
||||
|
||||
Run `bash scripts/glide/status.sh` to get a list of vendored dependencies that may not be up-to-date.
|
||||
That said, the master branch of every Tendermint repository should just build
|
||||
with `go get`, which means they should be kept up-to-date with their
|
||||
dependencies so we can get away with telling people they can just `go get` our
|
||||
software.
|
||||
|
||||
Since some dependencies are not under our control, a third party may break our
|
||||
build, in which case we can fall back on `dep ensure` (or `make
|
||||
get_vendor_deps`). Even for dependencies under our control, dep helps us to
|
||||
keep multiple repos in sync as they evolve. Anything with an executable, such
|
||||
as apps, tools, and the core, should use dep.
|
||||
|
||||
Run `dep status` to get a list of vendored dependencies that may not be
|
||||
up-to-date.
|
||||
|
||||
## Vagrant
|
||||
|
||||
If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started hacking Tendermint with the commands below.
|
||||
If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started
|
||||
hacking Tendermint with the commands below.
|
||||
|
||||
NOTE: In case you installed Vagrant in 2017, you might need to run
|
||||
`vagrant box update` to upgrade to the latest `ubuntu/xenial64`.
|
||||
@@ -53,11 +64,14 @@ vagrant ssh
|
||||
make test
|
||||
```
|
||||
|
||||
|
||||
## Testing
|
||||
|
||||
All repos should be hooked up to circle.
|
||||
If they have `.go` files in the root directory, they will be automatically tested by circle using `go test -v -race ./...`. If not, they will need a `circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and includes its continuous integration status using a badge in the `README.md`.
|
||||
All repos should be hooked up to [CircleCI](https://circleci.com/).
|
||||
|
||||
If they have `.go` files in the root directory, they will be automatically
|
||||
tested by circle using `go test -v -race ./...`. If not, they will need a
|
||||
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
|
||||
includes its continuous integration status using a badge in the `README.md`.
|
||||
|
||||
## Branching Model and Release
|
||||
|
||||
|
1
DOCKER/.gitignore
vendored
Normal file
1
DOCKER/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
tendermint
|
@@ -1,45 +1,39 @@
|
||||
FROM alpine:3.6
|
||||
FROM alpine:3.7
|
||||
MAINTAINER Greg Szabo <greg@tendermint.com>
|
||||
|
||||
# This is the release of tendermint to pull in.
|
||||
ENV TM_VERSION 0.15.0
|
||||
ENV TM_SHA256SUM 71cc271c67eca506ca492c8b90b090132f104bf5dbfe0af2702a50886e88de17
|
||||
|
||||
# Tendermint will be looking for genesis file in /tendermint (unless you change
|
||||
# `genesis_file` in config.toml). You can put your config.toml and private
|
||||
# validator file into /tendermint.
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
# (unless you change `genesis_file` in config.toml). You can put your config.toml and
|
||||
# private validator file into /tendermint/config.
|
||||
#
|
||||
# The /tendermint/data dir is used by tendermint to store state.
|
||||
ENV DATA_ROOT /tendermint
|
||||
ENV TMHOME $DATA_ROOT
|
||||
|
||||
# Set user right away for determinism
|
||||
RUN addgroup tmuser && \
|
||||
adduser -S -G tmuser tmuser
|
||||
|
||||
# Create directory for persistence and give our user ownership
|
||||
RUN mkdir -p $DATA_ROOT && \
|
||||
chown -R tmuser:tmuser $DATA_ROOT
|
||||
ENV TMHOME /tendermint
|
||||
|
||||
# OS environment setup
|
||||
# Set user right away for determinism, create directory for persistence and give our user ownership
|
||||
# jq and curl used for extracting `pub_key` from private validator while
|
||||
# deploying tendermint with Kubernetes. It is nice to have bash so the users
|
||||
# could execute bash commands.
|
||||
RUN apk add --no-cache bash curl jq
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add curl jq bash && \
|
||||
addgroup tmuser && \
|
||||
adduser -S -G tmuser tmuser -h "$TMHOME"
|
||||
|
||||
RUN apk add --no-cache openssl && \
|
||||
wget https://s3-us-west-2.amazonaws.com/tendermint/binaries/tendermint/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \
|
||||
echo "${TM_SHA256SUM} tendermint_${TM_VERSION}_linux_amd64.zip" | sha256sum -c && \
|
||||
unzip -d /bin tendermint_${TM_VERSION}_linux_amd64.zip && \
|
||||
apk del openssl && \
|
||||
rm -f tendermint_${TM_VERSION}_linux_amd64.zip
|
||||
# Run the container with tmuser by default. (UID=100, GID=1000)
|
||||
USER tmuser
|
||||
|
||||
# Expose the data directory as a volume since there's mutable state in there
|
||||
VOLUME $DATA_ROOT
|
||||
VOLUME [ $TMHOME ]
|
||||
|
||||
# p2p port
|
||||
EXPOSE 46656
|
||||
# rpc port
|
||||
EXPOSE 46657
|
||||
WORKDIR $TMHOME
|
||||
|
||||
ENTRYPOINT ["tendermint"]
|
||||
# p2p and rpc port
|
||||
EXPOSE 46656 46657
|
||||
|
||||
ENTRYPOINT ["/usr/bin/tendermint"]
|
||||
CMD ["node", "--moniker=`hostname`"]
|
||||
STOPSIGNAL SIGTERM
|
||||
|
||||
ARG BINARY=tendermint
|
||||
COPY $BINARY /usr/bin/tendermint
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.6
|
||||
FROM alpine:3.7
|
||||
|
||||
ENV DATA_ROOT /tendermint
|
||||
ENV TMHOME $DATA_ROOT
|
||||
@@ -18,9 +18,9 @@ RUN mkdir -p /go/src/github.com/tendermint/tendermint && \
|
||||
cd /go/src/github.com/tendermint/tendermint && \
|
||||
git clone https://github.com/tendermint/tendermint . && \
|
||||
git checkout develop && \
|
||||
make get_tools && \
|
||||
make get_vendor_deps && \
|
||||
make install && \
|
||||
glide cc && \
|
||||
cd - && \
|
||||
rm -rf /go/src/github.com/tendermint/tendermint && \
|
||||
apk del go build-base git
|
||||
@@ -32,4 +32,4 @@ EXPOSE 46657
|
||||
|
||||
ENTRYPOINT ["tendermint"]
|
||||
|
||||
CMD ["node", "--moniker=`hostname`", "--proxy_app=dummy"]
|
||||
CMD ["node", "--moniker=`hostname`", "--proxy_app=kvstore"]
|
||||
|
18
DOCKER/Dockerfile.testing
Normal file
18
DOCKER/Dockerfile.testing
Normal file
@@ -0,0 +1,18 @@
|
||||
FROM golang:1.10.1
|
||||
|
||||
|
||||
# Grab deps (jq, hexdump, xxd, killall)
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
jq bsdmainutils vim-common psmisc netcat
|
||||
|
||||
# Add testing deps for curl
|
||||
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends curl
|
||||
|
||||
VOLUME /go
|
||||
|
||||
EXPOSE 46656
|
||||
EXPOSE 46657
|
||||
|
@@ -7,6 +7,9 @@ push:
|
||||
build_develop:
|
||||
docker build -t "tendermint/tendermint:develop" -f Dockerfile.develop .
|
||||
|
||||
build_testing:
|
||||
docker build --tag tendermint/testing -f ./Dockerfile.testing .
|
||||
|
||||
push_develop:
|
||||
docker push "tendermint/tendermint:develop"
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
# Supported tags and respective `Dockerfile` links
|
||||
|
||||
- `0.15.0`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile)
|
||||
- `0.17.1`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/208ac32fa266657bd6c304e84ec828aa252bb0b8/DOCKER/Dockerfile)
|
||||
- `0.15.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile)
|
||||
- `0.13.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/a28b3fff49dce2fb31f90abb2fc693834e0029c2/DOCKER/Dockerfile)
|
||||
- `0.12.1` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/457c688346b565e90735431619ca3ca597ef9007/DOCKER/Dockerfile)
|
||||
- `0.12.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/70d8afa6e952e24c573ece345560a5971bf2cc0e/DOCKER/Dockerfile)
|
||||
@@ -34,13 +35,13 @@ To get started developing applications, see the [application developers guide](h
|
||||
|
||||
# How to use this image
|
||||
|
||||
## Start one instance of the Tendermint core with the `dummy` app
|
||||
## Start one instance of the Tendermint core with the `kvstore` app
|
||||
|
||||
A very simple example of a built-in app and Tendermint core in one container.
|
||||
|
||||
```
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=dummy
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore
|
||||
```
|
||||
|
||||
## mintnet-kubernetes
|
||||
|
389
Gopkg.lock
generated
Normal file
389
Gopkg.lock
generated
Normal file
@@ -0,0 +1,389 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/btcsuite/btcd"
|
||||
packages = ["btcec"]
|
||||
revision = "675abc5df3c5531bc741b56a765e35623459da6d"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ebuchman/fail-test"
|
||||
packages = ["."]
|
||||
revision = "95f809107225be108efcf10a3509e4ea6ceef3c4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/fortytw2/leaktest"
|
||||
packages = ["."]
|
||||
revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/fsnotify/fsnotify"
|
||||
packages = ["."]
|
||||
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||
version = "v1.4.7"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-kit/kit"
|
||||
packages = [
|
||||
"log",
|
||||
"log/level",
|
||||
"log/term"
|
||||
]
|
||||
revision = "4dc7be5d2d12881735283bcab7352178e190fc71"
|
||||
version = "v0.6.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-logfmt/logfmt"
|
||||
packages = ["."]
|
||||
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-stack/stack"
|
||||
packages = ["."]
|
||||
revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc"
|
||||
version = "v1.7.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"gogoproto",
|
||||
"jsonpb",
|
||||
"proto",
|
||||
"protoc-gen-gogo/descriptor",
|
||||
"sortkeys",
|
||||
"types"
|
||||
]
|
||||
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp"
|
||||
]
|
||||
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
revision = "553a641470496b2327abcac10b36396bd98e45c9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gorilla/websocket"
|
||||
packages = ["."]
|
||||
revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/hcl"
|
||||
packages = [
|
||||
".",
|
||||
"hcl/ast",
|
||||
"hcl/parser",
|
||||
"hcl/printer",
|
||||
"hcl/scanner",
|
||||
"hcl/strconv",
|
||||
"hcl/token",
|
||||
"json/parser",
|
||||
"json/scanner",
|
||||
"json/token"
|
||||
]
|
||||
revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/jmhodges/levigo"
|
||||
packages = ["."]
|
||||
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/logfmt"
|
||||
packages = ["."]
|
||||
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/magiconair/properties"
|
||||
packages = ["."]
|
||||
revision = "c3beff4c2358b44d0493c7dda585e7db7ff28ae6"
|
||||
version = "v1.7.6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
revision = "00c29f56e2386353d58c599509e8dc3801b0d716"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pelletier/go-toml"
|
||||
packages = ["."]
|
||||
revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
packages = ["."]
|
||||
revision = "d932a24a8ccb8fcadc993e5c6c58f93dac168294"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/afero"
|
||||
packages = [
|
||||
".",
|
||||
"mem"
|
||||
]
|
||||
revision = "63644898a8da0bc22138abf860edaf5277b6102e"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/cast"
|
||||
packages = ["."]
|
||||
revision = "8965335b8c7107321228e3e3702cab9832751bac"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4"
|
||||
version = "v0.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/jwalterweatherman"
|
||||
packages = ["."]
|
||||
revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/viper"
|
||||
packages = ["."]
|
||||
revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require"
|
||||
]
|
||||
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/syndtr/goleveldb"
|
||||
packages = [
|
||||
"leveldb",
|
||||
"leveldb/cache",
|
||||
"leveldb/comparer",
|
||||
"leveldb/errors",
|
||||
"leveldb/filter",
|
||||
"leveldb/iterator",
|
||||
"leveldb/journal",
|
||||
"leveldb/memdb",
|
||||
"leveldb/opt",
|
||||
"leveldb/storage",
|
||||
"leveldb/table",
|
||||
"leveldb/util"
|
||||
]
|
||||
revision = "714f901b98fdb3aa954b4193d8cbd64a28d80cad"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tendermint/abci"
|
||||
packages = [
|
||||
"client",
|
||||
"example/code",
|
||||
"example/counter",
|
||||
"example/kvstore",
|
||||
"server",
|
||||
"types"
|
||||
]
|
||||
revision = "78a8905690ef54f9d57e3b2b0ee7ad3a04ef3f1f"
|
||||
version = "v0.10.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/tendermint/ed25519"
|
||||
packages = [
|
||||
".",
|
||||
"edwards25519",
|
||||
"extra25519"
|
||||
]
|
||||
revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tendermint/go-amino"
|
||||
packages = ["."]
|
||||
revision = "ed62928576cfcaf887209dc96142cd79cdfff389"
|
||||
version = "0.9.9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tendermint/go-crypto"
|
||||
packages = ["."]
|
||||
revision = "915416979bf70efa4bcbf1c6cd5d64c5fff9fc19"
|
||||
version = "v0.6.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tendermint/go-wire"
|
||||
packages = ["."]
|
||||
revision = "fa721242b042ecd4c6ed1a934ee740db4f74e45c"
|
||||
version = "v0.7.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tendermint/tmlibs"
|
||||
packages = [
|
||||
"autofile",
|
||||
"cli",
|
||||
"cli/flags",
|
||||
"clist",
|
||||
"common",
|
||||
"db",
|
||||
"flowrate",
|
||||
"log",
|
||||
"merkle",
|
||||
"pubsub",
|
||||
"pubsub/query",
|
||||
"test"
|
||||
]
|
||||
revision = "cc5f287c4798ffe88c04d02df219ecb6932080fd"
|
||||
version = "v0.8.3-rc0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"curve25519",
|
||||
"nacl/box",
|
||||
"nacl/secretbox",
|
||||
"openpgp/armor",
|
||||
"openpgp/errors",
|
||||
"poly1305",
|
||||
"ripemd160",
|
||||
"salsa20/salsa"
|
||||
]
|
||||
revision = "b0697eccbea9adec5b7ba8008f4c33d98d733388"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"lex/httplex",
|
||||
"trace"
|
||||
]
|
||||
revision = "5f9ae10d9af5b1c89ae6904293b14b064d4ada23"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "bb9c189858d91f42db229b04d45a4c3d23a7662a"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"grpclb/grpc_lb_v1/messages",
|
||||
"grpclog",
|
||||
"internal",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport"
|
||||
]
|
||||
revision = "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e"
|
||||
version = "v1.7.5"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "52a0dcbebdf8714612444914cfce59a3af8c47c4453a2d43c4ccc5ff1a91d8ea"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
94
Gopkg.toml
Normal file
94
Gopkg.toml
Normal file
@@ -0,0 +1,94 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/ebuchman/fail-test"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/fortytw2/leaktest"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-kit/kit"
|
||||
version = "~0.6.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
version = "~1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "~1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gorilla/websocket"
|
||||
version = "~1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/errors"
|
||||
version = "~0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/cobra"
|
||||
version = "~0.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/viper"
|
||||
version = "~1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "~1.2.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/abci"
|
||||
version = "~0.10.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/go-crypto"
|
||||
version = "~0.6.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/go-amino"
|
||||
version = "0.9.9"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/tendermint/tmlibs"
|
||||
version = "~0.8.3-rc0"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "~1.7.3"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
143
Makefile
Normal file → Executable file
143
Makefile
Normal file → Executable file
@@ -1,26 +1,26 @@
|
||||
GOTOOLS = \
|
||||
github.com/tendermint/glide \
|
||||
# gopkg.in/alecthomas/gometalinter.v2
|
||||
github.com/golang/dep/cmd/dep \
|
||||
gopkg.in/alecthomas/gometalinter.v2
|
||||
PACKAGES=$(shell go list ./... | grep -v '/vendor/')
|
||||
BUILD_TAGS?=tendermint
|
||||
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`"
|
||||
|
||||
all: check build test install
|
||||
|
||||
check: check_tools get_vendor_deps
|
||||
check: check_tools ensure_deps
|
||||
|
||||
|
||||
########################################
|
||||
### Build
|
||||
|
||||
build:
|
||||
go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/
|
||||
|
||||
build_race:
|
||||
go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint
|
||||
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint
|
||||
|
||||
install:
|
||||
go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint
|
||||
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint
|
||||
|
||||
########################################
|
||||
### Distribution
|
||||
@@ -40,36 +40,84 @@ check_tools:
|
||||
get_tools:
|
||||
@echo "--> Installing tools"
|
||||
go get -u -v $(GOTOOLS)
|
||||
# @gometalinter.v2 --install
|
||||
@gometalinter.v2 --install
|
||||
|
||||
update_tools:
|
||||
@echo "--> Updating tools"
|
||||
@go get -u $(GOTOOLS)
|
||||
|
||||
#Run this from CI
|
||||
get_vendor_deps:
|
||||
@rm -rf vendor/
|
||||
@echo "--> Running glide install"
|
||||
@glide install
|
||||
@echo "--> Running dep"
|
||||
@dep ensure -vendor-only
|
||||
|
||||
|
||||
#Run this locally.
|
||||
ensure_deps:
|
||||
@rm -rf vendor/
|
||||
@echo "--> Running dep"
|
||||
@dep ensure
|
||||
|
||||
draw_deps:
|
||||
@# requires brew install graphviz or apt-get install graphviz
|
||||
go get github.com/RobotsAndPencils/goviz
|
||||
@goviz -i github.com/tendermint/tendermint/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png
|
||||
|
||||
get_deps_bin_size:
|
||||
@# Copy of build recipe with additional flags to perform binary size analysis
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ 2>&1))
|
||||
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
|
||||
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
|
||||
|
||||
########################################
|
||||
### Testing
|
||||
|
||||
test:
|
||||
@echo "--> Running go test"
|
||||
@go test $(PACKAGES)
|
||||
## required to be run first by most tests
|
||||
build_docker_test_image:
|
||||
docker build -t tester -f ./test/docker/Dockerfile .
|
||||
|
||||
test_race:
|
||||
@echo "--> Running go test --race"
|
||||
@go test -v -race $(PACKAGES)
|
||||
### coverage, app, persistence, and libs tests
|
||||
test_cover:
|
||||
# run the go unit tests with coverage
|
||||
bash test/test_cover.sh
|
||||
|
||||
test_apps:
|
||||
# run the app tests using bash
|
||||
# requires `abci-cli` and `tendermint` binaries installed
|
||||
bash test/app/test.sh
|
||||
|
||||
test_persistence:
|
||||
# run the persistence tests using bash
|
||||
# requires `abci-cli` installed
|
||||
docker run --name run_persistence -t tester bash test/persist/test_failure_indices.sh
|
||||
|
||||
# TODO undockerize
|
||||
# bash test/persist/test_failure_indices.sh
|
||||
|
||||
test_p2p:
|
||||
docker rm -f rsyslog || true
|
||||
rm -rf test/logs || true
|
||||
mkdir test/logs
|
||||
cd test/
|
||||
docker run -d -v "logs:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog
|
||||
cd ..
|
||||
# requires 'tester' the image from above
|
||||
bash test/p2p/test.sh tester
|
||||
|
||||
need_abci:
|
||||
bash scripts/install_abci_apps.sh
|
||||
|
||||
test_integrations:
|
||||
@bash ./test/test.sh
|
||||
make build_docker_test_image
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make install
|
||||
make need_abci
|
||||
make test_cover
|
||||
make test_apps
|
||||
make test_persistence
|
||||
make test_p2p
|
||||
|
||||
test_release:
|
||||
@go test -tags release $(PACKAGES)
|
||||
@@ -79,10 +127,17 @@ test100:
|
||||
|
||||
vagrant_test:
|
||||
vagrant up
|
||||
vagrant ssh -c 'make install'
|
||||
vagrant ssh -c 'make test_race'
|
||||
vagrant ssh -c 'make test_integrations'
|
||||
|
||||
### go tests
|
||||
test:
|
||||
@echo "--> Running go test"
|
||||
@go test $(PACKAGES)
|
||||
|
||||
test_race:
|
||||
@echo "--> Running go test --race"
|
||||
@go test -v -race $(PACKAGES)
|
||||
|
||||
|
||||
########################################
|
||||
### Formatting, linting, and vetting
|
||||
@@ -92,7 +147,7 @@ fmt:
|
||||
|
||||
metalinter:
|
||||
@echo "--> Running linter"
|
||||
gometalinter.v2 --vendor --deadline=600s --disable-all \
|
||||
@gometalinter.v2 --vendor --deadline=600s --disable-all \
|
||||
--enable=deadcode \
|
||||
--enable=gosimple \
|
||||
--enable=misspell \
|
||||
@@ -123,8 +178,56 @@ metalinter_all:
|
||||
@echo "--> Running linter (all)"
|
||||
gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./...
|
||||
|
||||
###########################################################
|
||||
### Docker image
|
||||
|
||||
build-docker:
|
||||
cp build/tendermint DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
rm -rf DOCKER/tendermint
|
||||
|
||||
###########################################################
|
||||
### Local testnet using docker
|
||||
|
||||
# Build linux binary on other platforms
|
||||
build-linux:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
|
||||
build-docker-localnode:
|
||||
cd networks/local
|
||||
make
|
||||
|
||||
# Run a 4-node testnet locally
|
||||
localnet-start: localnet-stop
|
||||
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
|
||||
docker-compose up
|
||||
|
||||
# Stop testnet
|
||||
localnet-stop:
|
||||
docker-compose down
|
||||
|
||||
###########################################################
|
||||
### Remote full-nodes (sentry) using terraform and ansible
|
||||
|
||||
# Server management
|
||||
sentry-start:
|
||||
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||
@if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi
|
||||
cd networks/remote/terraform && terraform init && terraform apply -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
|
||||
@if ! [ -f $(CURDIR)/build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 0 --n 4 --o . ; fi
|
||||
cd networks/remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml
|
||||
@echo "Next step: Add your validator setup in the genesis.json and config.tml files and run \"make sentry-config\". (Public key of validator, chain ID, peer IP and node ID.)"
|
||||
|
||||
# Configuration management
|
||||
sentry-config:
|
||||
cd networks/remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$(CURDIR)/build/tendermint -e CONFIGDIR=$(CURDIR)/build
|
||||
|
||||
sentry-stop:
|
||||
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||
cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
|
||||
|
||||
# To avoid unintended conflicts with file names, always add to .PHONY
|
||||
# unless there is a reason not to.
|
||||
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
||||
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test test_race test_integrations test_release test100 vagrant_test fmt metalinter metalinter_all
|
||||
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop
|
||||
|
||||
|
30
README.md
30
README.md
@@ -24,7 +24,14 @@ _NOTE: This is alpha software. Please contact us if you intend to run it in prod
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
|
||||
For more information, from introduction to install to application development, [Read The Docs](https://tendermint.readthedocs.io/en/master/).
|
||||
For protocol details, see [the specification](/docs/spec).
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://tendermint.com/security).
|
||||
|
||||
For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.md)
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
@@ -34,19 +41,20 @@ Go version | Go1.9 or higher
|
||||
|
||||
## Install
|
||||
|
||||
To download pre-built binaries, see our [downloads page](https://tendermint.com/downloads).
|
||||
See the [install instructions](/docs/install.rst)
|
||||
|
||||
To install from source, you should be able to:
|
||||
## Quick Start
|
||||
|
||||
`go get -u github.com/tendermint/tendermint/cmd/tendermint`
|
||||
|
||||
For more details (or if it fails), [read the docs](https://tendermint.readthedocs.io/en/master/install.html).
|
||||
- [Single node](/docs/using-tendermint.rst)
|
||||
- [Local cluster using docker-compose](/networks/local)
|
||||
- [Remote cluster using terraform and ansible](/networks/remote)
|
||||
|
||||
## Resources
|
||||
|
||||
### Tendermint Core
|
||||
|
||||
All resources involving the use of, building application on, or developing for, tendermint, can be found at [Read The Docs](https://tendermint.readthedocs.io/en/master/). Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs.
|
||||
For more, [Read The Docs](https://tendermint.readthedocs.io/en/master/).
|
||||
Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs.
|
||||
|
||||
### Sub-projects
|
||||
|
||||
@@ -61,8 +69,8 @@ All resources involving the use of, building application on, or developing for,
|
||||
|
||||
### Applications
|
||||
|
||||
* [Ethermint](http://github.com/tendermint/ethermint); Ethereum on Tendermint
|
||||
* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
* [Ethermint](http://github.com/tendermint/ethermint); Ethereum on Tendermint
|
||||
* [Many more](https://tendermint.readthedocs.io/en/master/ecosystem.html#abci-applications)
|
||||
|
||||
### More
|
||||
@@ -85,7 +93,11 @@ According to SemVer, anything in the public API can change at any time before ve
|
||||
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
to signal breaking changes across a subset of the total public API. This subset includes all
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), as well as parts of the following packages:
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
|
||||
include the in-process Go APIs.
|
||||
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- types
|
||||
- rpc/client
|
||||
|
23
ROADMAP.md
Normal file
23
ROADMAP.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Roadmap
|
||||
|
||||
BREAKING CHANGES:
|
||||
- Better support for injecting randomness
|
||||
- Upgrade consensus for more real-time use of evidence
|
||||
|
||||
FEATURES:
|
||||
- Use the chain as its own CA for nodes and validators
|
||||
- Tooling to run multiple blockchains/apps, possibly in a single process
|
||||
- State syncing (without transaction replay)
|
||||
- Add authentication and rate-limitting to the RPC
|
||||
|
||||
IMPROVEMENTS:
|
||||
- Improve subtleties around mempool caching and logic
|
||||
- Consensus optimizations:
|
||||
- cache block parts for faster agreement after round changes
|
||||
- propagate block parts rarest first
|
||||
- Better testing of the consensus state machine (ie. use a DSL)
|
||||
- Auto compiled serialization/deserialization code instead of go-wire reflection
|
||||
|
||||
BUG FIXES:
|
||||
- Graceful handling/recovery for apps that have non-determinism or fail to halt
|
||||
- Graceful handling/recovery for violations of safety, or liveness
|
71
SECURITY.md
Normal file
71
SECURITY.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# Security
|
||||
|
||||
As part of our [Coordinated Vulnerability Disclosure
|
||||
Policy](https://tendermint.com/security), we operate a bug bounty.
|
||||
See the policy for more details on submissions and rewards.
|
||||
|
||||
Here is a list of examples of the kinds of bugs we're most interested in:
|
||||
|
||||
## Specification
|
||||
|
||||
- Conceptual flaws
|
||||
- Ambiguities, inconsistencies, or incorrect statements
|
||||
- Mis-match between specification and implementation of any component
|
||||
|
||||
## Consensus
|
||||
|
||||
Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
|
||||
- Validation of blockchain data structures, including blocks, block parts,
|
||||
votes, and so on
|
||||
- Execution of blocks
|
||||
- Validator set changes
|
||||
- Proposer round robin
|
||||
- Two nodes committing conflicting blocks for the same height (safety failure)
|
||||
- A correct node signing conflicting votes
|
||||
- A node halting (liveness failure)
|
||||
- Syncing new and old nodes
|
||||
|
||||
## Networking
|
||||
|
||||
- Authenticated encryption (MITM, information leakage)
|
||||
- Eclipse attacks
|
||||
- Sybil attacks
|
||||
- Long-range attacks
|
||||
- Denial-of-Service
|
||||
|
||||
## RPC
|
||||
|
||||
- Write-access to anything besides sending transactions
|
||||
- Denial-of-Service
|
||||
- Leakage of secrets
|
||||
|
||||
## Denial-of-Service
|
||||
|
||||
Attacks may come through the P2P network or the RPC:
|
||||
|
||||
- Amplification attacks
|
||||
- Resource abuse
|
||||
- Deadlocks and race conditions
|
||||
- Panics and unhandled errors
|
||||
|
||||
## Libraries
|
||||
|
||||
- Serialization (Amino)
|
||||
- Reading/Writing files and databases
|
||||
- Logging and monitoring
|
||||
|
||||
## Cryptography
|
||||
|
||||
- Elliptic curves for validator signatures
|
||||
- Hash algorithms and Merkle trees for block validation
|
||||
- Authenticated encryption for P2P connections
|
||||
|
||||
## Light Client
|
||||
|
||||
- Validation of blockchain data structures
|
||||
- Correctly validating an incorrect proof
|
||||
- Incorrectly validating a correct proof
|
||||
- Syncing validator set changes
|
||||
|
||||
|
34
Vagrantfile
vendored
34
Vagrantfile
vendored
@@ -10,31 +10,37 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
# add docker repo
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable"
|
||||
|
||||
# and golang 1.9 support
|
||||
# official repo doesn't have race detection runtime...
|
||||
# add-apt-repository ppa:gophers/archive
|
||||
add-apt-repository ppa:longsleep/golang-backports
|
||||
apt-get update
|
||||
|
||||
# install base requirements
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends wget curl jq zip \
|
||||
make shellcheck bsdmainutils psmisc
|
||||
apt-get install -y docker-ce golang-1.9-go
|
||||
apt-get install -y language-pack-en
|
||||
|
||||
# install docker
|
||||
apt-get install -y --no-install-recommends apt-transport-https \
|
||||
ca-certificates curl software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable"
|
||||
apt-get install -y docker-ce
|
||||
usermod -a -G docker vagrant
|
||||
|
||||
# install go
|
||||
wget -q https://dl.google.com/go/go1.10.1.linux-amd64.tar.gz
|
||||
tar -xvf go1.10.1.linux-amd64.tar.gz
|
||||
mv go /usr/local
|
||||
rm -f go1.10.1.linux-amd64.tar.gz
|
||||
|
||||
# cleanup
|
||||
apt-get autoremove -y
|
||||
|
||||
# needed for docker
|
||||
usermod -a -G docker vagrant
|
||||
|
||||
# set env variables
|
||||
echo 'export PATH=$PATH:/usr/lib/go-1.9/bin:/home/vagrant/go/bin' >> /home/vagrant/.bash_profile
|
||||
echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile
|
||||
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
|
||||
echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile
|
||||
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
|
||||
echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile
|
||||
|
||||
|
@@ -14,7 +14,7 @@ if [ ! -d $DATA ]; then
|
||||
echo "starting node"
|
||||
tendermint node \
|
||||
--home $DATA \
|
||||
--proxy_app dummy \
|
||||
--proxy_app kvstore \
|
||||
--p2p.laddr tcp://127.0.0.1:56656 \
|
||||
--rpc.laddr tcp://127.0.0.1:56657 \
|
||||
--log_level error &
|
||||
@@ -35,7 +35,7 @@ cp -R $DATA $HOME1
|
||||
echo "starting validator node"
|
||||
tendermint node \
|
||||
--home $HOME1 \
|
||||
--proxy_app dummy \
|
||||
--proxy_app kvstore \
|
||||
--p2p.laddr tcp://127.0.0.1:56656 \
|
||||
--rpc.laddr tcp://127.0.0.1:56657 \
|
||||
--log_level error &
|
||||
@@ -48,7 +48,7 @@ cp $HOME1/genesis.json $HOME2
|
||||
printf "starting downloader node"
|
||||
tendermint node \
|
||||
--home $HOME2 \
|
||||
--proxy_app dummy \
|
||||
--proxy_app kvstore \
|
||||
--p2p.laddr tcp://127.0.0.1:56666 \
|
||||
--rpc.laddr tcp://127.0.0.1:56667 \
|
||||
--p2p.persistent_peers 127.0.0.1:56656 \
|
||||
|
@@ -4,8 +4,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/go-crypto"
|
||||
"github.com/tendermint/go-wire"
|
||||
|
||||
proto "github.com/tendermint/tendermint/benchmarks/proto"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -14,26 +14,35 @@ import (
|
||||
|
||||
func BenchmarkEncodeStatusWire(b *testing.B) {
|
||||
b.StopTimer()
|
||||
pubKey := crypto.GenPrivKeyEd25519().PubKey()
|
||||
cdc := amino.NewCodec()
|
||||
ctypes.RegisterAmino(cdc)
|
||||
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
|
||||
status := &ctypes.ResultStatus{
|
||||
NodeInfo: p2p.NodeInfo{
|
||||
PubKey: pubKey,
|
||||
ID: nodeKey.ID(),
|
||||
Moniker: "SOMENAME",
|
||||
Network: "SOMENAME",
|
||||
ListenAddr: "SOMEADDR",
|
||||
Version: "SOMEVER",
|
||||
Other: []string{"SOMESTRING", "OTHERSTRING"},
|
||||
},
|
||||
PubKey: pubKey,
|
||||
LatestBlockHash: []byte("SOMEBYTES"),
|
||||
LatestBlockHeight: 123,
|
||||
LatestBlockTime: time.Unix(0, 1234),
|
||||
SyncInfo: ctypes.SyncInfo{
|
||||
LatestBlockHash: []byte("SOMEBYTES"),
|
||||
LatestBlockHeight: 123,
|
||||
LatestBlockTime: time.Unix(0, 1234),
|
||||
},
|
||||
ValidatorInfo: ctypes.ValidatorInfo{
|
||||
PubKey: nodeKey.PubKey(),
|
||||
},
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
counter := 0
|
||||
for i := 0; i < b.N; i++ {
|
||||
jsonBytes := wire.JSONBytes(status)
|
||||
jsonBytes, err := cdc.MarshalJSON(status)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
counter += len(jsonBytes)
|
||||
}
|
||||
|
||||
@@ -41,9 +50,11 @@ func BenchmarkEncodeStatusWire(b *testing.B) {
|
||||
|
||||
func BenchmarkEncodeNodeInfoWire(b *testing.B) {
|
||||
b.StopTimer()
|
||||
pubKey := crypto.GenPrivKeyEd25519().PubKey()
|
||||
cdc := amino.NewCodec()
|
||||
ctypes.RegisterAmino(cdc)
|
||||
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
|
||||
nodeInfo := p2p.NodeInfo{
|
||||
PubKey: pubKey,
|
||||
ID: nodeKey.ID(),
|
||||
Moniker: "SOMENAME",
|
||||
Network: "SOMENAME",
|
||||
ListenAddr: "SOMEADDR",
|
||||
@@ -54,16 +65,21 @@ func BenchmarkEncodeNodeInfoWire(b *testing.B) {
|
||||
|
||||
counter := 0
|
||||
for i := 0; i < b.N; i++ {
|
||||
jsonBytes := wire.JSONBytes(nodeInfo)
|
||||
jsonBytes, err := cdc.MarshalJSON(nodeInfo)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
counter += len(jsonBytes)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
|
||||
b.StopTimer()
|
||||
pubKey := crypto.GenPrivKeyEd25519().PubKey()
|
||||
cdc := amino.NewCodec()
|
||||
ctypes.RegisterAmino(cdc)
|
||||
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
|
||||
nodeInfo := p2p.NodeInfo{
|
||||
PubKey: pubKey,
|
||||
ID: nodeKey.ID(),
|
||||
Moniker: "SOMENAME",
|
||||
Network: "SOMENAME",
|
||||
ListenAddr: "SOMEADDR",
|
||||
@@ -74,7 +90,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
|
||||
|
||||
counter := 0
|
||||
for i := 0; i < b.N; i++ {
|
||||
jsonBytes := wire.BinaryBytes(nodeInfo)
|
||||
jsonBytes := cdc.MustMarshalBinaryBare(nodeInfo)
|
||||
counter += len(jsonBytes)
|
||||
}
|
||||
|
||||
@@ -82,15 +98,20 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
|
||||
|
||||
func BenchmarkEncodeNodeInfoProto(b *testing.B) {
|
||||
b.StopTimer()
|
||||
pubKey := crypto.GenPrivKeyEd25519().PubKey().Unwrap().(crypto.PubKeyEd25519)
|
||||
pubKey2 := &proto.PubKey{Ed25519: &proto.PubKeyEd25519{Bytes: pubKey[:]}}
|
||||
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
|
||||
nodeID := string(nodeKey.ID())
|
||||
someName := "SOMENAME"
|
||||
someAddr := "SOMEADDR"
|
||||
someVer := "SOMEVER"
|
||||
someString := "SOMESTRING"
|
||||
otherString := "OTHERSTRING"
|
||||
nodeInfo := proto.NodeInfo{
|
||||
PubKey: pubKey2,
|
||||
Moniker: "SOMENAME",
|
||||
Network: "SOMENAME",
|
||||
ListenAddr: "SOMEADDR",
|
||||
Version: "SOMEVER",
|
||||
Other: []string{"SOMESTRING", "OTHERSTRING"},
|
||||
Id: &proto.ID{Id: &nodeID},
|
||||
Moniker: &someName,
|
||||
Network: &someName,
|
||||
ListenAddr: &someAddr,
|
||||
Version: &someVer,
|
||||
Other: []string{someString, otherString},
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@ message ResultStatus {
|
||||
}
|
||||
|
||||
message NodeInfo {
|
||||
required PubKey pubKey = 1;
|
||||
required ID id = 1;
|
||||
required string moniker = 2;
|
||||
required string network = 3;
|
||||
required string remoteAddr = 4;
|
||||
@@ -16,6 +16,10 @@ message NodeInfo {
|
||||
repeated string other = 7;
|
||||
}
|
||||
|
||||
message ID {
|
||||
required string id = 1;
|
||||
}
|
||||
|
||||
message PubKey {
|
||||
optional PubKeyEd25519 ed25519 = 1;
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
@@ -39,9 +40,12 @@ const (
|
||||
// Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s,
|
||||
// sending data across atlantic ~ 7.5 KB/s.
|
||||
minRecvRate = 7680
|
||||
|
||||
// Maximum difference between current and new block's height.
|
||||
maxDiffBetweenCurrentAndReceivedBlockHeight = 100
|
||||
)
|
||||
|
||||
var peerTimeoutSeconds = time.Duration(15) // not const so we can override with tests
|
||||
var peerTimeout = 15 * time.Second // not const so we can override with tests
|
||||
|
||||
/*
|
||||
Peers self report their heights when we join the block pool.
|
||||
@@ -68,10 +72,10 @@ type BlockPool struct {
|
||||
maxPeerHeight int64
|
||||
|
||||
requestsCh chan<- BlockRequest
|
||||
timeoutsCh chan<- p2p.ID
|
||||
errorsCh chan<- peerError
|
||||
}
|
||||
|
||||
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, timeoutsCh chan<- p2p.ID) *BlockPool {
|
||||
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
|
||||
bp := &BlockPool{
|
||||
peers: make(map[p2p.ID]*bpPeer),
|
||||
|
||||
@@ -80,7 +84,7 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, timeoutsCh chan<-
|
||||
numPending: 0,
|
||||
|
||||
requestsCh: requestsCh,
|
||||
timeoutsCh: timeoutsCh,
|
||||
errorsCh: errorsCh,
|
||||
}
|
||||
bp.BaseService = *cmn.NewBaseService(nil, "BlockPool", bp)
|
||||
return bp
|
||||
@@ -128,9 +132,10 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
curRate := peer.recvMonitor.Status().CurRate
|
||||
// curRate can be 0 on start
|
||||
if curRate != 0 && curRate < minRecvRate {
|
||||
pool.sendTimeout(peer.id)
|
||||
err := errors.New("peer is not sending us data fast enough")
|
||||
pool.sendError(err, peer.id)
|
||||
pool.Logger.Error("SendTimeout", "peer", peer.id,
|
||||
"reason", "peer is not sending us data fast enough",
|
||||
"reason", err,
|
||||
"curRate", fmt.Sprintf("%d KB/s", curRate/1024),
|
||||
"minRate", fmt.Sprintf("%d KB/s", minRecvRate/1024))
|
||||
peer.didTimeout = true
|
||||
@@ -199,7 +204,7 @@ func (pool *BlockPool) PopRequest() {
|
||||
delete(pool.requesters, pool.height)
|
||||
pool.height++
|
||||
} else {
|
||||
cmn.PanicSanity(cmn.Fmt("Expected requester to pop, got nothing at height %v", pool.height))
|
||||
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,8 +218,9 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
|
||||
request := pool.requesters[height]
|
||||
|
||||
if request.block == nil {
|
||||
cmn.PanicSanity("Expected block to be non-nil")
|
||||
panic("Expected block to be non-nil")
|
||||
}
|
||||
|
||||
// RemovePeer will redo all requesters associated with this peer.
|
||||
pool.removePeer(request.peerID)
|
||||
return request.peerID
|
||||
@@ -227,8 +233,14 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int
|
||||
|
||||
requester := pool.requesters[block.Height]
|
||||
if requester == nil {
|
||||
// a block we didn't expect.
|
||||
// TODO:if height is too far ahead, punish peer
|
||||
pool.Logger.Info("peer sent us a block we didn't expect", "peer", peerID, "curHeight", pool.height, "blockHeight", block.Height)
|
||||
diff := pool.height - block.Height
|
||||
if diff < 0 {
|
||||
diff *= -1
|
||||
}
|
||||
if diff > maxDiffBetweenCurrentAndReceivedBlockHeight {
|
||||
pool.sendError(errors.New("peer sent us a block we didn't expect with a height too far ahead/behind"), peerID)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -339,11 +351,11 @@ func (pool *BlockPool) sendRequest(height int64, peerID p2p.ID) {
|
||||
pool.requestsCh <- BlockRequest{height, peerID}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendTimeout(peerID p2p.ID) {
|
||||
func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
pool.timeoutsCh <- peerID
|
||||
pool.errorsCh <- peerError{err, peerID}
|
||||
}
|
||||
|
||||
// unused by tendermint; left for debugging purposes
|
||||
@@ -402,9 +414,9 @@ func (peer *bpPeer) resetMonitor() {
|
||||
|
||||
func (peer *bpPeer) resetTimeout() {
|
||||
if peer.timeout == nil {
|
||||
peer.timeout = time.AfterFunc(time.Second*peerTimeoutSeconds, peer.onTimeout)
|
||||
peer.timeout = time.AfterFunc(peerTimeout, peer.onTimeout)
|
||||
} else {
|
||||
peer.timeout.Reset(time.Second * peerTimeoutSeconds)
|
||||
peer.timeout.Reset(peerTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -430,8 +442,9 @@ func (peer *bpPeer) onTimeout() {
|
||||
peer.pool.mtx.Lock()
|
||||
defer peer.pool.mtx.Unlock()
|
||||
|
||||
peer.pool.sendTimeout(peer.id)
|
||||
peer.logger.Error("SendTimeout", "reason", "onTimeout")
|
||||
err := errors.New("peer did not send us anything")
|
||||
peer.pool.sendError(err, peer.id)
|
||||
peer.logger.Error("SendTimeout", "reason", err, "timeout", peerTimeout)
|
||||
peer.didTimeout = true
|
||||
}
|
||||
|
||||
|
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
peerTimeoutSeconds = time.Duration(2)
|
||||
peerTimeout = 2 * time.Second
|
||||
}
|
||||
|
||||
type testPeer struct {
|
||||
@@ -34,9 +34,9 @@ func makePeers(numPeers int, minHeight, maxHeight int64) map[p2p.ID]testPeer {
|
||||
func TestBasic(t *testing.T) {
|
||||
start := int64(42)
|
||||
peers := makePeers(10, start+1, 1000)
|
||||
timeoutsCh := make(chan p2p.ID, 100)
|
||||
requestsCh := make(chan BlockRequest, 100)
|
||||
pool := NewBlockPool(start, requestsCh, timeoutsCh)
|
||||
errorsCh := make(chan peerError, 1000)
|
||||
requestsCh := make(chan BlockRequest, 1000)
|
||||
pool := NewBlockPool(start, requestsCh, errorsCh)
|
||||
pool.SetLogger(log.TestingLogger())
|
||||
|
||||
err := pool.Start()
|
||||
@@ -71,8 +71,8 @@ func TestBasic(t *testing.T) {
|
||||
// Pull from channels
|
||||
for {
|
||||
select {
|
||||
case peerID := <-timeoutsCh:
|
||||
t.Errorf("timeout: %v", peerID)
|
||||
case err := <-errorsCh:
|
||||
t.Error(err)
|
||||
case request := <-requestsCh:
|
||||
t.Logf("Pulled new BlockRequest %v", request)
|
||||
if request.Height == 300 {
|
||||
@@ -91,9 +91,9 @@ func TestBasic(t *testing.T) {
|
||||
func TestTimeout(t *testing.T) {
|
||||
start := int64(42)
|
||||
peers := makePeers(10, start+1, 1000)
|
||||
timeoutsCh := make(chan p2p.ID, 100)
|
||||
requestsCh := make(chan BlockRequest, 100)
|
||||
pool := NewBlockPool(start, requestsCh, timeoutsCh)
|
||||
errorsCh := make(chan peerError, 1000)
|
||||
requestsCh := make(chan BlockRequest, 1000)
|
||||
pool := NewBlockPool(start, requestsCh, errorsCh)
|
||||
pool.SetLogger(log.TestingLogger())
|
||||
err := pool.Start()
|
||||
if err != nil {
|
||||
@@ -132,9 +132,10 @@ func TestTimeout(t *testing.T) {
|
||||
timedOut := map[p2p.ID]struct{}{}
|
||||
for {
|
||||
select {
|
||||
case peerID := <-timeoutsCh:
|
||||
t.Logf("Peer %v timeouted", peerID)
|
||||
if _, ok := timedOut[peerID]; !ok {
|
||||
case err := <-errorsCh:
|
||||
t.Log(err)
|
||||
// consider error to be always timeout here
|
||||
if _, ok := timedOut[err.peerID]; !ok {
|
||||
counter++
|
||||
if counter == len(peers) {
|
||||
return // Done!
|
||||
|
@@ -1,29 +1,23 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
wire "github.com/tendermint/go-wire"
|
||||
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
|
||||
defaultChannelCapacity = 1000
|
||||
trySyncIntervalMS = 50
|
||||
trySyncIntervalMS = 50
|
||||
// stop syncing when last block's time is
|
||||
// within this much of the system time.
|
||||
// stopSyncingDurationMinutes = 10
|
||||
@@ -32,6 +26,13 @@ const (
|
||||
statusUpdateIntervalSeconds = 10
|
||||
// check if we should switch to consensus reactor
|
||||
switchToConsensusIntervalSeconds = 1
|
||||
|
||||
// NOTE: keep up to date with bcBlockResponseMessage
|
||||
bcBlockResponseMessagePrefixSize = 4
|
||||
bcBlockResponseMessageFieldKeySize = 1
|
||||
maxMsgSize = types.MaxBlockSizeBytes +
|
||||
bcBlockResponseMessagePrefixSize +
|
||||
bcBlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
@@ -40,13 +41,19 @@ type consensusReactor interface {
|
||||
SwitchToConsensus(sm.State, int)
|
||||
}
|
||||
|
||||
type peerError struct {
|
||||
err error
|
||||
peerID p2p.ID
|
||||
}
|
||||
|
||||
func (e peerError) Error() string {
|
||||
return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error())
|
||||
}
|
||||
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
mtx sync.Mutex
|
||||
params types.ConsensusParams
|
||||
|
||||
// immutable
|
||||
initialState sm.State
|
||||
|
||||
@@ -56,7 +63,7 @@ type BlockchainReactor struct {
|
||||
fastSync bool
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
timeoutsCh <-chan p2p.ID
|
||||
errorsCh <-chan peerError
|
||||
}
|
||||
|
||||
// NewBlockchainReactor returns new reactor instance.
|
||||
@@ -64,26 +71,28 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *Bl
|
||||
fastSync bool) *BlockchainReactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
cmn.PanicSanity(cmn.Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
store.Height()))
|
||||
}
|
||||
|
||||
requestsCh := make(chan BlockRequest, defaultChannelCapacity)
|
||||
timeoutsCh := make(chan p2p.ID, defaultChannelCapacity)
|
||||
const capacity = 1000 // must be bigger than peers count
|
||||
requestsCh := make(chan BlockRequest, capacity)
|
||||
errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock
|
||||
|
||||
pool := NewBlockPool(
|
||||
store.Height()+1,
|
||||
requestsCh,
|
||||
timeoutsCh,
|
||||
errorsCh,
|
||||
)
|
||||
|
||||
bcR := &BlockchainReactor{
|
||||
params: state.ConsensusParams,
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: pool,
|
||||
fastSync: fastSync,
|
||||
requestsCh: requestsCh,
|
||||
timeoutsCh: timeoutsCh,
|
||||
errorsCh: errorsCh,
|
||||
}
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
|
||||
return bcR
|
||||
@@ -120,17 +129,19 @@ func (bcR *BlockchainReactor) OnStop() {
|
||||
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 10,
|
||||
SendQueueCapacity: 1000,
|
||||
ID: BlockchainChannel,
|
||||
Priority: 10,
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
if !peer.Send(BlockchainChannel,
|
||||
struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
if !peer.Send(BlockchainChannel, msgBytes) {
|
||||
// doing nothing, will try later in `poolRoutine`
|
||||
}
|
||||
// peer is added to the pool once we receive the first
|
||||
@@ -151,22 +162,22 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage,
|
||||
|
||||
block := bcR.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
msg := &bcBlockResponseMessage{Block: block}
|
||||
return src.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg})
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block})
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
return src.TrySend(BlockchainChannel, struct{ BlockchainMessage }{
|
||||
&bcNoBlockResponseMessage{Height: msg.Height},
|
||||
})
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: msg.Height})
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
_, msg, err := DecodeMessage(msgBytes, bcR.maxMsgSize())
|
||||
msg, err := DecodeMessage(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error decoding message", "err", err)
|
||||
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -182,8 +193,8 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes))
|
||||
case *bcStatusRequestMessage:
|
||||
// Send peer our state.
|
||||
queued := src.TrySend(BlockchainChannel,
|
||||
struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}})
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
queued := src.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
// sorry
|
||||
}
|
||||
@@ -195,21 +206,6 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
}
|
||||
}
|
||||
|
||||
// maxMsgSize returns the maximum allowable size of a
|
||||
// message on the blockchain reactor.
|
||||
func (bcR *BlockchainReactor) maxMsgSize() int {
|
||||
bcR.mtx.Lock()
|
||||
defer bcR.mtx.Unlock()
|
||||
return bcR.params.BlockSize.MaxBytes + 2
|
||||
}
|
||||
|
||||
// updateConsensusParams updates the internal consensus params
|
||||
func (bcR *BlockchainReactor) updateConsensusParams(params types.ConsensusParams) {
|
||||
bcR.mtx.Lock()
|
||||
defer bcR.mtx.Unlock()
|
||||
bcR.params = params
|
||||
}
|
||||
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.)
|
||||
@@ -230,23 +226,22 @@ func (bcR *BlockchainReactor) poolRoutine() {
|
||||
FOR_LOOP:
|
||||
for {
|
||||
select {
|
||||
case request := <-bcR.requestsCh: // chan BlockRequest
|
||||
case request := <-bcR.requestsCh:
|
||||
peer := bcR.Switch.Peers().Get(request.PeerID)
|
||||
if peer == nil {
|
||||
continue FOR_LOOP // Peer has since been disconnected.
|
||||
}
|
||||
msg := &bcBlockRequestMessage{request.Height}
|
||||
queued := peer.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg})
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height})
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
// We couldn't make the request, send-queue full.
|
||||
// The pool handles timeouts, just let it go.
|
||||
continue FOR_LOOP
|
||||
}
|
||||
case peerID := <-bcR.timeoutsCh: // chan string
|
||||
// Peer timed out.
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
case err := <-bcR.errorsCh:
|
||||
peer := bcR.Switch.Peers().Get(err.peerID)
|
||||
if peer != nil {
|
||||
bcR.Switch.StopPeerForError(peer, errors.New("BlockchainReactor Timeout"))
|
||||
bcR.Switch.StopPeerForError(peer, err)
|
||||
}
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
@@ -310,9 +305,6 @@ FOR_LOOP:
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
// update the consensus params
|
||||
bcR.updateConsensusParams(state.ConsensusParams)
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
|
||||
@@ -330,43 +322,36 @@ FOR_LOOP:
|
||||
|
||||
// BroadcastStatusRequest broadcasts `BlockStore` height.
|
||||
func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
|
||||
bcR.Switch.Broadcast(BlockchainChannel,
|
||||
struct{ BlockchainMessage }{&bcStatusRequestMessage{bcR.store.Height()}})
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()})
|
||||
bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Messages
|
||||
|
||||
const (
|
||||
msgTypeBlockRequest = byte(0x10)
|
||||
msgTypeBlockResponse = byte(0x11)
|
||||
msgTypeNoBlockResponse = byte(0x12)
|
||||
msgTypeStatusResponse = byte(0x20)
|
||||
msgTypeStatusRequest = byte(0x21)
|
||||
)
|
||||
|
||||
// BlockchainMessage is a generic message for this reactor.
|
||||
type BlockchainMessage interface{}
|
||||
|
||||
var _ = wire.RegisterInterface(
|
||||
struct{ BlockchainMessage }{},
|
||||
wire.ConcreteType{&bcBlockRequestMessage{}, msgTypeBlockRequest},
|
||||
wire.ConcreteType{&bcBlockResponseMessage{}, msgTypeBlockResponse},
|
||||
wire.ConcreteType{&bcNoBlockResponseMessage{}, msgTypeNoBlockResponse},
|
||||
wire.ConcreteType{&bcStatusResponseMessage{}, msgTypeStatusResponse},
|
||||
wire.ConcreteType{&bcStatusRequestMessage{}, msgTypeStatusRequest},
|
||||
)
|
||||
func RegisterBlockchainMessages(cdc *amino.Codec) {
|
||||
cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
|
||||
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/mempool/BlockRequest", nil)
|
||||
cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/mempool/BlockResponse", nil)
|
||||
cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/mempool/NoBlockResponse", nil)
|
||||
cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/mempool/StatusResponse", nil)
|
||||
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil)
|
||||
}
|
||||
|
||||
// DecodeMessage decodes BlockchainMessage.
|
||||
// TODO: ensure that bz is completely read.
|
||||
func DecodeMessage(bz []byte, maxSize int) (msgType byte, msg BlockchainMessage, err error) {
|
||||
msgType = bz[0]
|
||||
n := int(0)
|
||||
r := bytes.NewReader(bz)
|
||||
msg = wire.ReadBinary(struct{ BlockchainMessage }{}, r, maxSize, &n, &err).(struct{ BlockchainMessage }).BlockchainMessage
|
||||
if err != nil && n != len(bz) {
|
||||
err = errors.New("DecodeMessage() had bytes left over")
|
||||
func DecodeMessage(bz []byte) (msg BlockchainMessage, err error) {
|
||||
if len(bz) > maxMsgSize {
|
||||
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
|
||||
len(bz), maxMsgSize)
|
||||
}
|
||||
err = cdc.UnmarshalBinaryBare(bz, &msg)
|
||||
if err != nil {
|
||||
err = cmn.ErrorWrap(err, "DecodeMessage() had bytes left over")
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -391,7 +376,6 @@ func (brm *bcNoBlockResponseMessage) String() string {
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// NOTE: keep up-to-date with maxBlockchainResponseSize
|
||||
type bcBlockResponseMessage struct {
|
||||
Block *types.Block
|
||||
}
|
||||
|
@@ -3,8 +3,6 @@ package blockchain
|
||||
import (
|
||||
"testing"
|
||||
|
||||
wire "github.com/tendermint/go-wire"
|
||||
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
dbm "github.com/tendermint/tmlibs/db"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
@@ -18,8 +16,15 @@ import (
|
||||
|
||||
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
blockStore := NewBlockStore(dbm.NewMemDB())
|
||||
state, _ := sm.LoadStateFromDBOrGenesisFile(dbm.NewMemDB(), config.GenesisFile())
|
||||
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
|
||||
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := NewBlockStore(blockDB)
|
||||
state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
|
||||
if err != nil {
|
||||
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
|
||||
}
|
||||
return state, blockStore
|
||||
}
|
||||
|
||||
@@ -76,10 +81,9 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
// wait for our response to be received on the peer
|
||||
for _, tt := range tests {
|
||||
reqBlockMsg := &bcBlockRequestMessage{tt.height}
|
||||
reqBlockBytes := wire.BinaryBytes(struct{ BlockchainMessage }{reqBlockMsg})
|
||||
reqBlockBytes := cdc.MustMarshalBinaryBare(reqBlockMsg)
|
||||
bcr.Receive(chID, peer, reqBlockBytes)
|
||||
value := peer.lastValue()
|
||||
msg := value.(struct{ BlockchainMessage }).BlockchainMessage
|
||||
msg := peer.lastBlockchainMessage()
|
||||
|
||||
if tt.existent {
|
||||
if blockMsg, ok := msg.(*bcBlockResponseMessage); !ok {
|
||||
@@ -173,26 +177,30 @@ func newbcrTestPeer(id p2p.ID) *bcrTestPeer {
|
||||
return bcr
|
||||
}
|
||||
|
||||
func (tp *bcrTestPeer) lastValue() interface{} { return <-tp.ch }
|
||||
func (tp *bcrTestPeer) lastBlockchainMessage() interface{} { return <-tp.ch }
|
||||
|
||||
func (tp *bcrTestPeer) TrySend(chID byte, value interface{}) bool {
|
||||
if _, ok := value.(struct{ BlockchainMessage }).
|
||||
BlockchainMessage.(*bcStatusResponseMessage); ok {
|
||||
func (tp *bcrTestPeer) TrySend(chID byte, msgBytes []byte) bool {
|
||||
var msg BlockchainMessage
|
||||
err := cdc.UnmarshalBinaryBare(msgBytes, &msg)
|
||||
if err != nil {
|
||||
panic(cmn.ErrorWrap(err, "Error while trying to parse a BlockchainMessage"))
|
||||
}
|
||||
if _, ok := msg.(*bcStatusResponseMessage); ok {
|
||||
// Discard status response messages since they skew our results
|
||||
// We only want to deal with:
|
||||
// + bcBlockResponseMessage
|
||||
// + bcNoBlockResponseMessage
|
||||
} else {
|
||||
tp.ch <- value
|
||||
tp.ch <- msg
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (tp *bcrTestPeer) Send(chID byte, data interface{}) bool { return tp.TrySend(chID, data) }
|
||||
func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} }
|
||||
func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} }
|
||||
func (tp *bcrTestPeer) ID() p2p.ID { return tp.id }
|
||||
func (tp *bcrTestPeer) IsOutbound() bool { return false }
|
||||
func (tp *bcrTestPeer) IsPersistent() bool { return true }
|
||||
func (tp *bcrTestPeer) Get(s string) interface{} { return s }
|
||||
func (tp *bcrTestPeer) Set(string, interface{}) {}
|
||||
func (tp *bcrTestPeer) Send(chID byte, msgBytes []byte) bool { return tp.TrySend(chID, msgBytes) }
|
||||
func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} }
|
||||
func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} }
|
||||
func (tp *bcrTestPeer) ID() p2p.ID { return tp.id }
|
||||
func (tp *bcrTestPeer) IsOutbound() bool { return false }
|
||||
func (tp *bcrTestPeer) IsPersistent() bool { return true }
|
||||
func (tp *bcrTestPeer) Get(s string) interface{} { return s }
|
||||
func (tp *bcrTestPeer) Set(string, interface{}) {}
|
||||
|
@@ -1,14 +1,9 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
wire "github.com/tendermint/go-wire"
|
||||
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
dbm "github.com/tendermint/tmlibs/db"
|
||||
|
||||
@@ -54,38 +49,25 @@ func (bs *BlockStore) Height() int64 {
|
||||
return bs.height
|
||||
}
|
||||
|
||||
// GetReader returns the value associated with the given key wrapped in an io.Reader.
|
||||
// If no value is found, it returns nil.
|
||||
// It's mainly for use with wire.ReadBinary.
|
||||
func (bs *BlockStore) GetReader(key []byte) io.Reader {
|
||||
bytez := bs.db.Get(key)
|
||||
if bytez == nil {
|
||||
return nil
|
||||
}
|
||||
return bytes.NewReader(bytez)
|
||||
}
|
||||
|
||||
// LoadBlock returns the block with the given height.
|
||||
// If no block is found for that height, it returns nil.
|
||||
func (bs *BlockStore) LoadBlock(height int64) *types.Block {
|
||||
var n int
|
||||
var err error
|
||||
r := bs.GetReader(calcBlockMetaKey(height))
|
||||
if r == nil {
|
||||
var blockMeta = bs.LoadBlockMeta(height)
|
||||
if blockMeta == nil {
|
||||
return nil
|
||||
}
|
||||
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err))
|
||||
}
|
||||
bytez := []byte{}
|
||||
|
||||
var block = new(types.Block)
|
||||
buf := []byte{}
|
||||
for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ {
|
||||
part := bs.LoadBlockPart(height, i)
|
||||
bytez = append(bytez, part.Bytes...)
|
||||
buf = append(buf, part.Bytes...)
|
||||
}
|
||||
block := wire.ReadBinary(&types.Block{}, bytes.NewReader(bytez), 0, &n, &err).(*types.Block)
|
||||
err := cdc.UnmarshalBinary(buf, block)
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(cmn.Fmt("Error reading block: %v", err))
|
||||
// NOTE: The existence of meta should imply the existence of the
|
||||
// block. So, make sure meta is only saved after blocks are saved.
|
||||
panic(cmn.ErrorWrap(err, "Error reading block"))
|
||||
}
|
||||
return block
|
||||
}
|
||||
@@ -94,15 +76,14 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block {
|
||||
// from the block at the given height.
|
||||
// If no part is found for the given height and index, it returns nil.
|
||||
func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
|
||||
var n int
|
||||
var err error
|
||||
r := bs.GetReader(calcBlockPartKey(height, index))
|
||||
if r == nil {
|
||||
var part = new(types.Part)
|
||||
bz := bs.db.Get(calcBlockPartKey(height, index))
|
||||
if len(bz) == 0 {
|
||||
return nil
|
||||
}
|
||||
part := wire.ReadBinary(&types.Part{}, r, 0, &n, &err).(*types.Part)
|
||||
err := cdc.UnmarshalBinaryBare(bz, part)
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(cmn.Fmt("Error reading block part: %v", err))
|
||||
panic(cmn.ErrorWrap(err, "Error reading block part"))
|
||||
}
|
||||
return part
|
||||
}
|
||||
@@ -110,15 +91,14 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
|
||||
// LoadBlockMeta returns the BlockMeta for the given height.
|
||||
// If no block is found for the given height, it returns nil.
|
||||
func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
var n int
|
||||
var err error
|
||||
r := bs.GetReader(calcBlockMetaKey(height))
|
||||
if r == nil {
|
||||
var blockMeta = new(types.BlockMeta)
|
||||
bz := bs.db.Get(calcBlockMetaKey(height))
|
||||
if len(bz) == 0 {
|
||||
return nil
|
||||
}
|
||||
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
|
||||
err := cdc.UnmarshalBinaryBare(bz, blockMeta)
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err))
|
||||
panic(cmn.ErrorWrap(err, "Error reading block meta"))
|
||||
}
|
||||
return blockMeta
|
||||
}
|
||||
@@ -128,15 +108,14 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
// and it comes from the block.LastCommit for `height+1`.
|
||||
// If no commit is found for the given height, it returns nil.
|
||||
func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
var n int
|
||||
var err error
|
||||
r := bs.GetReader(calcBlockCommitKey(height))
|
||||
if r == nil {
|
||||
var commit = new(types.Commit)
|
||||
bz := bs.db.Get(calcBlockCommitKey(height))
|
||||
if len(bz) == 0 {
|
||||
return nil
|
||||
}
|
||||
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
|
||||
err := cdc.UnmarshalBinaryBare(bz, commit)
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err))
|
||||
panic(cmn.ErrorWrap(err, "Error reading block commit"))
|
||||
}
|
||||
return commit
|
||||
}
|
||||
@@ -145,15 +124,14 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
// This is useful when we've seen a commit, but there has not yet been
|
||||
// a new block at `height + 1` that includes this commit in its block.LastCommit.
|
||||
func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||
var n int
|
||||
var err error
|
||||
r := bs.GetReader(calcSeenCommitKey(height))
|
||||
if r == nil {
|
||||
var commit = new(types.Commit)
|
||||
bz := bs.db.Get(calcSeenCommitKey(height))
|
||||
if len(bz) == 0 {
|
||||
return nil
|
||||
}
|
||||
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
|
||||
err := cdc.UnmarshalBinaryBare(bz, commit)
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err))
|
||||
panic(cmn.ErrorWrap(err, "Error reading block seen commit"))
|
||||
}
|
||||
return commit
|
||||
}
|
||||
@@ -178,21 +156,22 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
|
||||
|
||||
// Save block meta
|
||||
blockMeta := types.NewBlockMeta(block, blockParts)
|
||||
metaBytes := wire.BinaryBytes(blockMeta)
|
||||
metaBytes := cdc.MustMarshalBinaryBare(blockMeta)
|
||||
bs.db.Set(calcBlockMetaKey(height), metaBytes)
|
||||
|
||||
// Save block parts
|
||||
for i := 0; i < blockParts.Total(); i++ {
|
||||
bs.saveBlockPart(height, i, blockParts.GetPart(i))
|
||||
part := blockParts.GetPart(i)
|
||||
bs.saveBlockPart(height, i, part)
|
||||
}
|
||||
|
||||
// Save block commit (duplicate and separate from the Block)
|
||||
blockCommitBytes := wire.BinaryBytes(block.LastCommit)
|
||||
blockCommitBytes := cdc.MustMarshalBinaryBare(block.LastCommit)
|
||||
bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes)
|
||||
|
||||
// Save seen commit (seen +2/3 precommits for block)
|
||||
// NOTE: we can delete this at a later height
|
||||
seenCommitBytes := wire.BinaryBytes(seenCommit)
|
||||
seenCommitBytes := cdc.MustMarshalBinaryBare(seenCommit)
|
||||
bs.db.Set(calcSeenCommitKey(height), seenCommitBytes)
|
||||
|
||||
// Save new BlockStoreStateJSON descriptor
|
||||
@@ -211,7 +190,7 @@ func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
|
||||
if height != bs.Height()+1 {
|
||||
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
|
||||
}
|
||||
partBytes := wire.BinaryBytes(part)
|
||||
partBytes := cdc.MustMarshalBinaryBare(part)
|
||||
bs.db.Set(calcBlockPartKey(height, index), partBytes)
|
||||
}
|
||||
|
||||
@@ -238,12 +217,12 @@ func calcSeenCommitKey(height int64) []byte {
|
||||
var blockStoreKey = []byte("blockStore")
|
||||
|
||||
type BlockStoreStateJSON struct {
|
||||
Height int64
|
||||
Height int64 `json:"height"`
|
||||
}
|
||||
|
||||
// Save persists the blockStore state to the database as JSON.
|
||||
func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
|
||||
bytes, err := json.Marshal(bsj)
|
||||
bytes, err := cdc.MarshalJSON(bsj)
|
||||
if err != nil {
|
||||
cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err))
|
||||
}
|
||||
@@ -260,9 +239,9 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
|
||||
}
|
||||
}
|
||||
bsj := BlockStoreStateJSON{}
|
||||
err := json.Unmarshal(bytes, &bsj)
|
||||
err := cdc.UnmarshalJSON(bytes, &bsj)
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(cmn.Fmt("Could not unmarshal bytes: %X", bytes))
|
||||
panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes))
|
||||
}
|
||||
return bsj
|
||||
}
|
||||
|
@@ -3,7 +3,6 @@ package blockchain
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -11,9 +10,6 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
wire "github.com/tendermint/go-wire"
|
||||
|
||||
"github.com/tendermint/tmlibs/db"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
@@ -35,7 +31,7 @@ func TestNewBlockStore(t *testing.T) {
|
||||
db := db.NewMemDB()
|
||||
db.Set(blockStoreKey, []byte(`{"height": 10000}`))
|
||||
bs := NewBlockStore(db)
|
||||
assert.Equal(t, bs.Height(), int64(10000), "failed to properly parse blockstore")
|
||||
require.Equal(t, int64(10000), bs.Height(), "failed to properly parse blockstore")
|
||||
|
||||
panicCausers := []struct {
|
||||
data []byte
|
||||
@@ -61,38 +57,6 @@ func TestNewBlockStore(t *testing.T) {
|
||||
assert.Equal(t, bs.Height(), int64(0), "expecting nil bytes to be unmarshaled alright")
|
||||
}
|
||||
|
||||
func TestBlockStoreGetReader(t *testing.T) {
|
||||
db := db.NewMemDB()
|
||||
// Initial setup
|
||||
db.Set([]byte("Foo"), []byte("Bar"))
|
||||
db.Set([]byte("Foo1"), nil)
|
||||
|
||||
bs := NewBlockStore(db)
|
||||
|
||||
tests := [...]struct {
|
||||
key []byte
|
||||
want []byte
|
||||
}{
|
||||
0: {key: []byte("Foo"), want: []byte("Bar")},
|
||||
1: {key: []byte("KnoxNonExistent"), want: nil},
|
||||
2: {key: []byte("Foo1"), want: []byte{}},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
r := bs.GetReader(tt.key)
|
||||
if r == nil {
|
||||
assert.Nil(t, tt.want, "#%d: expected a non-nil reader", i)
|
||||
continue
|
||||
}
|
||||
slurp, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Errorf("#%d: unexpected Read err: %v", i, err)
|
||||
} else {
|
||||
assert.Equal(t, slurp, tt.want, "#%d: mismatch", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func freshBlockStore() (*BlockStore, db.DB) {
|
||||
db := db.NewMemDB()
|
||||
return NewBlockStore(db), db
|
||||
@@ -133,7 +97,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
|
||||
incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2})
|
||||
uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0})
|
||||
uncontiguousPartSet.AddPart(part2, false)
|
||||
uncontiguousPartSet.AddPart(part2)
|
||||
|
||||
header1 := types.Header{
|
||||
Height: 1,
|
||||
@@ -189,14 +153,14 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
corruptCommitInDB: true, // Corrupt the DB's commit entry
|
||||
wantPanic: "rror reading commit",
|
||||
wantPanic: "Error reading block commit",
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
wantPanic: "rror reading block",
|
||||
wantPanic: "Error reading block",
|
||||
corruptBlockInDB: true, // Corrupt the DB's block entry
|
||||
},
|
||||
|
||||
@@ -215,7 +179,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
seenCommit: seenCommit1,
|
||||
|
||||
corruptSeenCommitInDB: true,
|
||||
wantPanic: "rror reading commit",
|
||||
wantPanic: "Error reading block seen commit",
|
||||
},
|
||||
|
||||
{
|
||||
@@ -305,14 +269,6 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func binarySerializeIt(v interface{}) []byte {
|
||||
var n int
|
||||
var err error
|
||||
buf := new(bytes.Buffer)
|
||||
wire.WriteBinary(v, buf, &n, &err)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func TestLoadBlockPart(t *testing.T) {
|
||||
bs, db := freshBlockStore()
|
||||
height, index := int64(10), 1
|
||||
@@ -334,7 +290,7 @@ func TestLoadBlockPart(t *testing.T) {
|
||||
require.Contains(t, panicErr.Error(), "Error reading block part")
|
||||
|
||||
// 3. A good block serialized and saved to the DB should be retrievable
|
||||
db.Set(calcBlockPartKey(height, index), binarySerializeIt(part1))
|
||||
db.Set(calcBlockPartKey(height, index), cdc.MustMarshalBinaryBare(part1))
|
||||
gotPart, _, panicErr := doFn(loadPart)
|
||||
require.Nil(t, panicErr, "an existent and proper block should not panic")
|
||||
require.Nil(t, res, "a properly saved block should return a proper block")
|
||||
@@ -364,11 +320,11 @@ func TestLoadBlockMeta(t *testing.T) {
|
||||
|
||||
// 3. A good blockMeta serialized and saved to the DB should be retrievable
|
||||
meta := &types.BlockMeta{}
|
||||
db.Set(calcBlockMetaKey(height), binarySerializeIt(meta))
|
||||
db.Set(calcBlockMetaKey(height), cdc.MustMarshalBinaryBare(meta))
|
||||
gotMeta, _, panicErr := doFn(loadMeta)
|
||||
require.Nil(t, panicErr, "an existent and proper block should not panic")
|
||||
require.Nil(t, res, "a properly saved blockMeta should return a proper blocMeta ")
|
||||
require.Equal(t, binarySerializeIt(meta), binarySerializeIt(gotMeta),
|
||||
require.Equal(t, cdc.MustMarshalBinaryBare(meta), cdc.MustMarshalBinaryBare(gotMeta),
|
||||
"expecting successful retrieval of previously saved blockMeta")
|
||||
}
|
||||
|
||||
@@ -385,6 +341,9 @@ func TestBlockFetchAtHeight(t *testing.T) {
|
||||
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
|
||||
|
||||
blockAtHeight := bs.LoadBlock(bs.Height())
|
||||
bz1 := cdc.MustMarshalBinaryBare(block)
|
||||
bz2 := cdc.MustMarshalBinaryBare(blockAtHeight)
|
||||
require.Equal(t, bz1, bz2)
|
||||
require.Equal(t, block.Hash(), blockAtHeight.Hash(),
|
||||
"expecting a successful load of the last saved block")
|
||||
|
||||
|
13
blockchain/wire.go
Normal file
13
blockchain/wire.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/go-crypto"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
RegisterBlockchainMessages(cdc)
|
||||
crypto.RegisterAmino(cdc)
|
||||
}
|
35
circle.yml
35
circle.yml
@@ -1,35 +0,0 @@
|
||||
---
|
||||
machine:
|
||||
environment:
|
||||
MACH_PREFIX: tendermint-test-mach
|
||||
DOCKER_VERSION: 1.10.0
|
||||
DOCKER_MACHINE_VERSION: 0.9.0
|
||||
GOPATH: "$HOME/.go_project"
|
||||
PROJECT_PARENT_PATH: "$GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME"
|
||||
PROJECT_PATH: "$PROJECT_PARENT_PATH/$CIRCLE_PROJECT_REPONAME"
|
||||
PATH: "$HOME/.go_project/bin:${PATH}"
|
||||
hosts:
|
||||
localhost: 127.0.0.1
|
||||
|
||||
dependencies:
|
||||
override:
|
||||
- curl -sSL https://s3.amazonaws.com/circle-downloads/install-circleci-docker.sh | sudo bash -s -- $DOCKER_VERSION
|
||||
- sudo start docker
|
||||
- sudo curl -sSL -o /usr/bin/docker-machine "https://github.com/docker/machine/releases/download/v$DOCKER_MACHINE_VERSION/docker-machine-`uname -s`-`uname -m`"; sudo chmod 0755 /usr/bin/docker-machine
|
||||
- mkdir -p "$PROJECT_PARENT_PATH"
|
||||
- ln -sf "$HOME/$CIRCLE_PROJECT_REPONAME/" "$PROJECT_PATH"
|
||||
post:
|
||||
- go version
|
||||
- docker version
|
||||
- docker-machine version
|
||||
|
||||
test:
|
||||
override:
|
||||
- cd "$PROJECT_PATH" && set -o pipefail && make test_integrations 2>&1 | tee test_integrations.log:
|
||||
timeout: 1800
|
||||
post:
|
||||
- cd "$PROJECT_PATH" && mv test_integrations.log "${CIRCLE_ARTIFACTS}"
|
||||
- cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt
|
||||
- cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}"
|
||||
- cd "$PROJECT_PATH" && cp test/logs/messages "${CIRCLE_ARTIFACTS}/docker.log"
|
||||
- cd "${CIRCLE_ARTIFACTS}" && tar czf logs.tar.gz *.log
|
53
cmd/priv_val_server/main.go
Normal file
53
cmd/priv_val_server/main.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
priv_val "github.com/tendermint/tendermint/types/priv_validator"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
addr = flag.String("addr", ":46659", "Address of client to connect to")
|
||||
chainID = flag.String("chain-id", "mychain", "chain id")
|
||||
privValPath = flag.String("priv", "", "priv val file path")
|
||||
|
||||
logger = log.NewTMLogger(
|
||||
log.NewSyncWriter(os.Stdout),
|
||||
).With("module", "priv_val")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
logger.Info(
|
||||
"Starting private validator",
|
||||
"addr", *addr,
|
||||
"chainID", *chainID,
|
||||
"privPath", *privValPath,
|
||||
)
|
||||
|
||||
privVal := priv_val.LoadFilePV(*privValPath)
|
||||
|
||||
rs := priv_val.NewRemoteSigner(
|
||||
logger,
|
||||
*chainID,
|
||||
*addr,
|
||||
privVal,
|
||||
crypto.GenPrivKeyEd25519(),
|
||||
)
|
||||
err := rs.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cmn.TrapSignal(func() {
|
||||
err := rs.Stop()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
}
|
32
cmd/tendermint/commands/gen_node_key.go
Normal file
32
cmd/tendermint/commands/gen_node_key.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
// GenNodeKeyCmd allows the generation of a node key. It prints node's ID to
|
||||
// the standard output.
|
||||
var GenNodeKeyCmd = &cobra.Command{
|
||||
Use: "gen_node_key",
|
||||
Short: "Generate a node key for this node and print its ID",
|
||||
RunE: genNodeKey,
|
||||
}
|
||||
|
||||
func genNodeKey(cmd *cobra.Command, args []string) error {
|
||||
nodeKeyFile := config.NodeKeyFile()
|
||||
if cmn.FileExists(nodeKeyFile) {
|
||||
return fmt.Errorf("node key at %s already exists", nodeKeyFile)
|
||||
}
|
||||
|
||||
nodeKey, err := p2p.LoadOrGenNodeKey(nodeKeyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(nodeKey.ID())
|
||||
return nil
|
||||
}
|
@@ -1,12 +1,11 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
)
|
||||
|
||||
// GenValidatorCmd allows the generation of a keypair for a
|
||||
@@ -18,11 +17,11 @@ var GenValidatorCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func genValidator(cmd *cobra.Command, args []string) {
|
||||
privValidator := types.GenPrivValidatorFS("")
|
||||
privValidatorJSONBytes, err := json.MarshalIndent(privValidator, "", "\t")
|
||||
pv := pvm.GenFilePV("")
|
||||
jsbz, err := cdc.MarshalJSON(pv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf(`%v
|
||||
`, string(privValidatorJSONBytes))
|
||||
`, string(jsbz))
|
||||
}
|
||||
|
@@ -1,9 +1,14 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
@@ -11,20 +16,34 @@ import (
|
||||
var InitFilesCmd = &cobra.Command{
|
||||
Use: "init",
|
||||
Short: "Initialize Tendermint",
|
||||
Run: initFiles,
|
||||
RunE: initFiles,
|
||||
}
|
||||
|
||||
func initFiles(cmd *cobra.Command, args []string) {
|
||||
func initFiles(cmd *cobra.Command, args []string) error {
|
||||
return initFilesWithConfig(config)
|
||||
}
|
||||
|
||||
func initFilesWithConfig(config *cfg.Config) error {
|
||||
// private validator
|
||||
privValFile := config.PrivValidatorFile()
|
||||
var privValidator *types.PrivValidatorFS
|
||||
var pv *pvm.FilePV
|
||||
if cmn.FileExists(privValFile) {
|
||||
privValidator = types.LoadPrivValidatorFS(privValFile)
|
||||
pv = pvm.LoadFilePV(privValFile)
|
||||
logger.Info("Found private validator", "path", privValFile)
|
||||
} else {
|
||||
privValidator = types.GenPrivValidatorFS(privValFile)
|
||||
privValidator.Save()
|
||||
logger.Info("Genetated private validator", "path", privValFile)
|
||||
pv = pvm.GenFilePV(privValFile)
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator", "path", privValFile)
|
||||
}
|
||||
|
||||
nodeKeyFile := config.NodeKeyFile()
|
||||
if cmn.FileExists(nodeKeyFile) {
|
||||
logger.Info("Found node key", "path", nodeKeyFile)
|
||||
} else {
|
||||
if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info("Generated node key", "path", nodeKeyFile)
|
||||
}
|
||||
|
||||
// genesis file
|
||||
@@ -33,16 +52,19 @@ func initFiles(cmd *cobra.Command, args []string) {
|
||||
logger.Info("Found genesis file", "path", genFile)
|
||||
} else {
|
||||
genDoc := types.GenesisDoc{
|
||||
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
|
||||
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
|
||||
GenesisTime: time.Now(),
|
||||
}
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
PubKey: privValidator.GetPubKey(),
|
||||
PubKey: pv.GetPubKey(),
|
||||
Power: 10,
|
||||
}}
|
||||
|
||||
if err := genDoc.SaveAs(genFile); err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
logger.Info("Genetated genesis file", "path", genFile)
|
||||
logger.Info("Generated genesis file", "path", genFile)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@@ -1,6 +1,9 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
@@ -31,13 +34,37 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
LiteCmd.Flags().StringVar(&listenAddr, "laddr", ":8888", "Serve the proxy on the given port")
|
||||
LiteCmd.Flags().StringVar(&nodeAddr, "node", "localhost:46657", "Connect to a Tendermint node at this address")
|
||||
LiteCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888", "Serve the proxy on the given address")
|
||||
LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:46657", "Connect to a Tendermint node at this address")
|
||||
LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID")
|
||||
LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory")
|
||||
}
|
||||
|
||||
func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "tcp", "unix":
|
||||
case "":
|
||||
u.Scheme = "tcp"
|
||||
default:
|
||||
return "", fmt.Errorf("unknown scheme %q, use either tcp or unix", u.Scheme)
|
||||
}
|
||||
return u.String(), nil
|
||||
}
|
||||
|
||||
func runProxy(cmd *cobra.Command, args []string) error {
|
||||
nodeAddr, err := ensureAddrHasSchemeOrDefaultToTCP(nodeAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
listenAddr, err := ensureAddrHasSchemeOrDefaultToTCP(listenAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// First, connect a client
|
||||
node := rpcclient.NewHTTP(nodeAddr, "/websocket")
|
||||
|
||||
|
@@ -1,7 +1,6 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -22,7 +21,7 @@ func probeUpnp(cmd *cobra.Command, args []string) error {
|
||||
fmt.Println("Probe failed: ", err)
|
||||
} else {
|
||||
fmt.Println("Probe success!")
|
||||
jsonBytes, err := json.Marshal(capabilities)
|
||||
jsonBytes, err := cdc.MarshalJSON(capabilities)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
)
|
||||
|
||||
@@ -27,7 +27,7 @@ var ResetPrivValidatorCmd = &cobra.Command{
|
||||
// ResetAll removes the privValidator files.
|
||||
// Exported so other CLI tools can use it.
|
||||
func ResetAll(dbDir, privValFile string, logger log.Logger) {
|
||||
resetPrivValidatorFS(privValFile, logger)
|
||||
resetFilePV(privValFile, logger)
|
||||
if err := os.RemoveAll(dbDir); err != nil {
|
||||
logger.Error("Error removing directory", "err", err)
|
||||
return
|
||||
@@ -44,18 +44,18 @@ func resetAll(cmd *cobra.Command, args []string) {
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) {
|
||||
resetPrivValidatorFS(config.PrivValidatorFile(), logger)
|
||||
resetFilePV(config.PrivValidatorFile(), logger)
|
||||
}
|
||||
|
||||
func resetPrivValidatorFS(privValFile string, logger log.Logger) {
|
||||
func resetFilePV(privValFile string, logger log.Logger) {
|
||||
// Get PrivValidator
|
||||
if _, err := os.Stat(privValFile); err == nil {
|
||||
privValidator := types.LoadPrivValidatorFS(privValFile)
|
||||
privValidator.Reset()
|
||||
pv := pvm.LoadFilePV(privValFile)
|
||||
pv.Reset()
|
||||
logger.Info("Reset PrivValidator", "file", privValFile)
|
||||
} else {
|
||||
privValidator := types.GenPrivValidatorFS(privValFile)
|
||||
privValidator.Save()
|
||||
pv := pvm.GenFilePV(privValFile)
|
||||
pv.Save()
|
||||
logger.Info("Generated PrivValidator", "file", privValFile)
|
||||
}
|
||||
}
|
||||
|
@@ -14,11 +14,14 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
// bind flags
|
||||
cmd.Flags().String("moniker", config.Moniker, "Node Name")
|
||||
|
||||
// priv val flags
|
||||
cmd.Flags().String("priv_validator_laddr", config.PrivValidatorListenAddr, "Socket address to listen on for connections from external priv_validator process")
|
||||
|
||||
// node flags
|
||||
cmd.Flags().Bool("fast_sync", config.FastSync, "Fast blockchain syncing")
|
||||
|
||||
// abci flags
|
||||
cmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or 'nilapp' or 'dummy' for local testing.")
|
||||
cmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or 'nilapp' or 'kvstore' for local testing.")
|
||||
cmd.Flags().String("abci", config.ABCI, "Specify abci transport (socket | grpc)")
|
||||
|
||||
// rpc flags
|
||||
@@ -28,18 +31,19 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
|
||||
// p2p flags
|
||||
cmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)")
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma delimited host:port seed nodes")
|
||||
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma delimited host:port persistent peers")
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma-delimited ID@host:port seed nodes")
|
||||
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma-delimited ID@host:port persistent peers")
|
||||
cmd.Flags().Bool("p2p.skip_upnp", config.P2P.SkipUPNP, "Skip UPNP configuration")
|
||||
cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable/disable Peer-Exchange")
|
||||
cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "Enable/disable seed mode")
|
||||
cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "Comma-delimited private peer IDs")
|
||||
|
||||
// consensus flags
|
||||
cmd.Flags().Bool("consensus.create_empty_blocks", config.Consensus.CreateEmptyBlocks, "Set this to false to only produce blocks when there are txs or when the AppHash changes")
|
||||
}
|
||||
|
||||
// NewRunNodeCmd returns the command that allows the CLI to start a
|
||||
// node. It can be used with a custom PrivValidator and in-process ABCI application.
|
||||
// NewRunNodeCmd returns the command that allows the CLI to start a node.
|
||||
// It can be used with a custom PrivValidator and in-process ABCI application.
|
||||
func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "node",
|
||||
@@ -53,9 +57,8 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
|
||||
|
||||
if err := n.Start(); err != nil {
|
||||
return fmt.Errorf("Failed to start node: %v", err)
|
||||
} else {
|
||||
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
|
||||
}
|
||||
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
|
||||
|
||||
// Trap signal, run forever.
|
||||
n.RunForever()
|
||||
|
27
cmd/tendermint/commands/show_node_id.go
Normal file
27
cmd/tendermint/commands/show_node_id.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// ShowNodeIDCmd dumps node's ID to the standard output.
|
||||
var ShowNodeIDCmd = &cobra.Command{
|
||||
Use: "show_node_id",
|
||||
Short: "Show this node's ID",
|
||||
RunE: showNodeID,
|
||||
}
|
||||
|
||||
func showNodeID(cmd *cobra.Command, args []string) error {
|
||||
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(nodeKey.ID())
|
||||
|
||||
return nil
|
||||
}
|
@@ -5,8 +5,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/go-wire/data"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
privval "github.com/tendermint/tendermint/types/priv_validator"
|
||||
)
|
||||
|
||||
// ShowValidatorCmd adds capabilities for showing the validator info.
|
||||
@@ -17,7 +16,7 @@ var ShowValidatorCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func showValidator(cmd *cobra.Command, args []string) {
|
||||
privValidator := types.LoadOrGenPrivValidatorFS(config.PrivValidatorFile())
|
||||
pubKeyJSONBytes, _ := data.ToJSON(privValidator.PubKey)
|
||||
privValidator := privval.LoadOrGenFilePV(config.PrivValidatorFile())
|
||||
pubKeyJSONBytes, _ := cdc.MarshalJSON(privValidator.GetPubKey())
|
||||
fmt.Println(string(pubKeyJSONBytes))
|
||||
}
|
||||
|
@@ -2,59 +2,114 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
//flags
|
||||
var (
|
||||
nValidators int
|
||||
dataDir string
|
||||
nValidators int
|
||||
nNonValidators int
|
||||
outputDir string
|
||||
nodeDirPrefix string
|
||||
|
||||
populatePersistentPeers bool
|
||||
hostnamePrefix string
|
||||
startingIPAddress string
|
||||
p2pPort int
|
||||
)
|
||||
|
||||
const (
|
||||
nodeDirPerm = 0755
|
||||
)
|
||||
|
||||
func init() {
|
||||
TestnetFilesCmd.Flags().IntVar(&nValidators, "n", 4,
|
||||
TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4,
|
||||
"Number of validators to initialize the testnet with")
|
||||
TestnetFilesCmd.Flags().StringVar(&dataDir, "dir", "mytestnet",
|
||||
TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0,
|
||||
"Number of non-validators to initialize the testnet with")
|
||||
TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet",
|
||||
"Directory to store initialization data for the testnet")
|
||||
TestnetFilesCmd.Flags().StringVar(&nodeDirPrefix, "node-dir-prefix", "node",
|
||||
"Prefix the directory name for each node with (node results in node0, node1, ...)")
|
||||
|
||||
TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true,
|
||||
"Update config of each node with the list of persistent peers build using either hostname-prefix or starting-ip-address")
|
||||
TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node",
|
||||
"Hostname prefix (node results in persistent peers list ID0@node0:46656, ID1@node1:46656, ...)")
|
||||
TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "",
|
||||
"Starting IP address (192.168.0.1 results in persistent peers list ID0@192.168.0.1:46656, ID1@192.168.0.2:46656, ...)")
|
||||
TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 46656,
|
||||
"P2P Port")
|
||||
}
|
||||
|
||||
// TestnetFilesCmd allows initialisation of files for a
|
||||
// Tendermint testnet.
|
||||
// TestnetFilesCmd allows initialisation of files for a Tendermint testnet.
|
||||
var TestnetFilesCmd = &cobra.Command{
|
||||
Use: "testnet",
|
||||
Short: "Initialize files for a Tendermint testnet",
|
||||
Run: testnetFiles,
|
||||
Long: `testnet will create "v" + "n" number of directories and populate each with
|
||||
necessary files (private validator, genesis, config, etc.).
|
||||
|
||||
Note, strict routability for addresses is turned off in the config file.
|
||||
|
||||
Optionally, it will fill in persistent_peers list in config file using either hostnames or IPs.
|
||||
|
||||
Example:
|
||||
|
||||
tendermint testnet --v 4 --o ./output --populate-persistent-peers --starting-ip-address 192.168.10.2
|
||||
`,
|
||||
RunE: testnetFiles,
|
||||
}
|
||||
|
||||
func testnetFiles(cmd *cobra.Command, args []string) {
|
||||
|
||||
func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
config := cfg.DefaultConfig()
|
||||
genVals := make([]types.GenesisValidator, nValidators)
|
||||
defaultConfig := cfg.DefaultBaseConfig()
|
||||
|
||||
// Initialize core dir and priv_validator.json's
|
||||
for i := 0; i < nValidators; i++ {
|
||||
mach := cmn.Fmt("mach%d", i)
|
||||
err := initMachCoreDirectory(dataDir, mach)
|
||||
nodeDirName := cmn.Fmt("%s%d", nodeDirPrefix, i)
|
||||
nodeDir := filepath.Join(outputDir, nodeDirName)
|
||||
config.SetRoot(nodeDir)
|
||||
|
||||
err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm)
|
||||
if err != nil {
|
||||
cmn.Exit(err.Error())
|
||||
_ = os.RemoveAll(outputDir)
|
||||
return err
|
||||
}
|
||||
// Read priv_validator.json to populate vals
|
||||
privValFile := filepath.Join(dataDir, mach, defaultConfig.PrivValidator)
|
||||
privVal := types.LoadPrivValidatorFS(privValFile)
|
||||
|
||||
initFilesWithConfig(config)
|
||||
|
||||
pvFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidator)
|
||||
pv := pvm.LoadFilePV(pvFile)
|
||||
genVals[i] = types.GenesisValidator{
|
||||
PubKey: privVal.GetPubKey(),
|
||||
PubKey: pv.GetPubKey(),
|
||||
Power: 1,
|
||||
Name: mach,
|
||||
Name: nodeDirName,
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i+nValidators))
|
||||
config.SetRoot(nodeDir)
|
||||
|
||||
err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm)
|
||||
if err != nil {
|
||||
_ = os.RemoveAll(outputDir)
|
||||
return err
|
||||
}
|
||||
|
||||
initFilesWithConfig(config)
|
||||
}
|
||||
|
||||
// Generate genesis doc from generated validators
|
||||
genDoc := &types.GenesisDoc{
|
||||
GenesisTime: time.Now(),
|
||||
@@ -63,35 +118,66 @@ func testnetFiles(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
|
||||
// Write genesis file.
|
||||
for i := 0; i < nValidators; i++ {
|
||||
mach := cmn.Fmt("mach%d", i)
|
||||
if err := genDoc.SaveAs(filepath.Join(dataDir, mach, defaultConfig.Genesis)); err != nil {
|
||||
panic(err)
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
|
||||
if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil {
|
||||
_ = os.RemoveAll(outputDir)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println(cmn.Fmt("Successfully initialized %v node directories", nValidators))
|
||||
}
|
||||
|
||||
// Initialize per-machine core directory
|
||||
func initMachCoreDirectory(base, mach string) error {
|
||||
dir := filepath.Join(base, mach)
|
||||
err := cmn.EnsureDir(dir, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
if populatePersistentPeers {
|
||||
err := populatePersistentPeersInConfigAndWriteIt(config)
|
||||
if err != nil {
|
||||
_ = os.RemoveAll(outputDir)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Create priv_validator.json file if not present
|
||||
defaultConfig := cfg.DefaultBaseConfig()
|
||||
ensurePrivValidator(filepath.Join(dir, defaultConfig.PrivValidator))
|
||||
fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func ensurePrivValidator(file string) {
|
||||
if cmn.FileExists(file) {
|
||||
return
|
||||
func hostnameOrIP(i int) string {
|
||||
if startingIPAddress != "" {
|
||||
ip := net.ParseIP(startingIPAddress)
|
||||
ip = ip.To4()
|
||||
if ip == nil {
|
||||
fmt.Printf("%v: non ipv4 address\n", startingIPAddress)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for j := 0; j < i; j++ {
|
||||
ip[3]++
|
||||
}
|
||||
return ip.String()
|
||||
}
|
||||
privValidator := types.GenPrivValidatorFS(file)
|
||||
privValidator.Save()
|
||||
|
||||
return fmt.Sprintf("%s%d", hostnamePrefix, i)
|
||||
}
|
||||
|
||||
func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error {
|
||||
persistentPeers := make([]string, nValidators+nNonValidators)
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
persistentPeers[i] = p2p.IDAddressString(nodeKey.ID(), fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
|
||||
}
|
||||
persistentPeersList := strings.Join(persistentPeers, ",")
|
||||
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
config.P2P.PersistentPeers = persistentPeersList
|
||||
config.P2P.AddrBookStrict = false
|
||||
|
||||
// overwrite default config
|
||||
cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
12
cmd/tendermint/commands/wire.go
Normal file
12
cmd/tendermint/commands/wire.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/go-crypto"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
crypto.RegisterAmino(cdc)
|
||||
}
|
@@ -24,6 +24,8 @@ func main() {
|
||||
cmd.ResetPrivValidatorCmd,
|
||||
cmd.ShowValidatorCmd,
|
||||
cmd.TestnetFilesCmd,
|
||||
cmd.ShowNodeIDCmd,
|
||||
cmd.GenNodeKeyCmd,
|
||||
cmd.VersionCmd)
|
||||
|
||||
// NOTE:
|
||||
|
23
codecov.yml
Normal file
23
codecov.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
coverage:
|
||||
precision: 2
|
||||
round: down
|
||||
range: "70...100"
|
||||
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 1%
|
||||
patch: on
|
||||
changes: off
|
||||
|
||||
comment:
|
||||
layout: "diff, files"
|
||||
behavior: default
|
||||
require_changes: no
|
||||
require_base: no
|
||||
require_head: yes
|
||||
|
||||
ignore:
|
||||
- "docs"
|
||||
- "DOCKER"
|
||||
- "scripts"
|
192
config/config.go
192
config/config.go
@@ -20,9 +20,10 @@ var (
|
||||
|
||||
defaultConfigFileName = "config.toml"
|
||||
defaultGenesisJSONName = "genesis.json"
|
||||
defaultPrivValName = "priv_validator.json"
|
||||
defaultNodeKeyName = "node_key.json"
|
||||
defaultAddrBookName = "addrbook.json"
|
||||
|
||||
defaultPrivValName = "priv_validator.json"
|
||||
defaultNodeKeyName = "node_key.json"
|
||||
defaultAddrBookName = "addrbook.json"
|
||||
|
||||
defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName)
|
||||
defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName)
|
||||
@@ -103,6 +104,10 @@ type BaseConfig struct {
|
||||
// A custom human readable name for this node
|
||||
Moniker string `mapstructure:"moniker"`
|
||||
|
||||
// TCP or UNIX socket address for Tendermint to listen on for
|
||||
// connections from an external PrivValidator process
|
||||
PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"`
|
||||
|
||||
// TCP or UNIX socket address of the ABCI application,
|
||||
// or the name of an ABCI application compiled in with the Tendermint binary
|
||||
ProxyApp string `mapstructure:"proxy_app"`
|
||||
@@ -132,10 +137,6 @@ type BaseConfig struct {
|
||||
DBPath string `mapstructure:"db_dir"`
|
||||
}
|
||||
|
||||
func (c BaseConfig) ChainID() string {
|
||||
return c.chainID
|
||||
}
|
||||
|
||||
// DefaultBaseConfig returns a default base configuration for a Tendermint node
|
||||
func DefaultBaseConfig() BaseConfig {
|
||||
return BaseConfig{
|
||||
@@ -156,32 +157,36 @@ func DefaultBaseConfig() BaseConfig {
|
||||
|
||||
// TestBaseConfig returns a base configuration for testing a Tendermint node
|
||||
func TestBaseConfig() BaseConfig {
|
||||
conf := DefaultBaseConfig()
|
||||
conf.chainID = "tendermint_test"
|
||||
conf.ProxyApp = "dummy"
|
||||
conf.FastSync = false
|
||||
conf.DBBackend = "memdb"
|
||||
return conf
|
||||
cfg := DefaultBaseConfig()
|
||||
cfg.chainID = "tendermint_test"
|
||||
cfg.ProxyApp = "kvstore"
|
||||
cfg.FastSync = false
|
||||
cfg.DBBackend = "memdb"
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (cfg BaseConfig) ChainID() string {
|
||||
return cfg.chainID
|
||||
}
|
||||
|
||||
// GenesisFile returns the full path to the genesis.json file
|
||||
func (b BaseConfig) GenesisFile() string {
|
||||
return rootify(b.Genesis, b.RootDir)
|
||||
func (cfg BaseConfig) GenesisFile() string {
|
||||
return rootify(cfg.Genesis, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorFile returns the full path to the priv_validator.json file
|
||||
func (b BaseConfig) PrivValidatorFile() string {
|
||||
return rootify(b.PrivValidator, b.RootDir)
|
||||
func (cfg BaseConfig) PrivValidatorFile() string {
|
||||
return rootify(cfg.PrivValidator, cfg.RootDir)
|
||||
}
|
||||
|
||||
// NodeKeyFile returns the full path to the node_key.json file
|
||||
func (b BaseConfig) NodeKeyFile() string {
|
||||
return rootify(b.NodeKey, b.RootDir)
|
||||
func (cfg BaseConfig) NodeKeyFile() string {
|
||||
return rootify(cfg.NodeKey, cfg.RootDir)
|
||||
}
|
||||
|
||||
// DBDir returns the full path to the database directory
|
||||
func (b BaseConfig) DBDir() string {
|
||||
return rootify(b.DBPath, b.RootDir)
|
||||
func (cfg BaseConfig) DBDir() string {
|
||||
return rootify(cfg.DBPath, cfg.RootDir)
|
||||
}
|
||||
|
||||
// DefaultLogLevel returns a default log level of "error"
|
||||
@@ -224,11 +229,11 @@ func DefaultRPCConfig() *RPCConfig {
|
||||
|
||||
// TestRPCConfig returns a configuration for testing the RPC server
|
||||
func TestRPCConfig() *RPCConfig {
|
||||
conf := DefaultRPCConfig()
|
||||
conf.ListenAddress = "tcp://0.0.0.0:36657"
|
||||
conf.GRPCListenAddress = "tcp://0.0.0.0:36658"
|
||||
conf.Unsafe = true
|
||||
return conf
|
||||
cfg := DefaultRPCConfig()
|
||||
cfg.ListenAddress = "tcp://0.0.0.0:36657"
|
||||
cfg.GRPCListenAddress = "tcp://0.0.0.0:36658"
|
||||
cfg.Unsafe = true
|
||||
return cfg
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -245,8 +250,8 @@ type P2PConfig struct {
|
||||
// We only use these if we can’t connect to peers in the addrbook
|
||||
Seeds string `mapstructure:"seeds"`
|
||||
|
||||
// Comma separated list of persistent peers to connect to
|
||||
// We always connect to these
|
||||
// Comma separated list of nodes to keep persistent connections to
|
||||
// Do not add private peers to this list if you don't want them advertised
|
||||
PersistentPeers string `mapstructure:"persistent_peers"`
|
||||
|
||||
// Skip UPNP port forwarding
|
||||
@@ -265,7 +270,7 @@ type P2PConfig struct {
|
||||
FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"`
|
||||
|
||||
// Maximum size of a message packet payload, in bytes
|
||||
MaxMsgPacketPayloadSize int `mapstructure:"max_msg_packet_payload_size"`
|
||||
MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
|
||||
|
||||
// Rate at which packets can be sent, in bytes/second
|
||||
SendRate int64 `mapstructure:"send_rate"`
|
||||
@@ -281,6 +286,12 @@ type P2PConfig struct {
|
||||
//
|
||||
// Does not work if the peer-exchange reactor is disabled.
|
||||
SeedMode bool `mapstructure:"seed_mode"`
|
||||
|
||||
// Authenticated encryption
|
||||
AuthEnc bool `mapstructure:"auth_enc"`
|
||||
|
||||
// Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
PrivatePeerIDs string `mapstructure:"private_peer_ids"`
|
||||
}
|
||||
|
||||
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer
|
||||
@@ -291,26 +302,27 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
AddrBookStrict: true,
|
||||
MaxNumPeers: 50,
|
||||
FlushThrottleTimeout: 100,
|
||||
MaxMsgPacketPayloadSize: 1024, // 1 kB
|
||||
MaxPacketMsgPayloadSize: 1024, // 1 kB
|
||||
SendRate: 512000, // 500 kB/s
|
||||
RecvRate: 512000, // 500 kB/s
|
||||
PexReactor: true,
|
||||
SeedMode: false,
|
||||
AuthEnc: true,
|
||||
}
|
||||
}
|
||||
|
||||
// TestP2PConfig returns a configuration for testing the peer-to-peer layer
|
||||
func TestP2PConfig() *P2PConfig {
|
||||
conf := DefaultP2PConfig()
|
||||
conf.ListenAddress = "tcp://0.0.0.0:36656"
|
||||
conf.SkipUPNP = true
|
||||
conf.FlushThrottleTimeout = 10
|
||||
return conf
|
||||
cfg := DefaultP2PConfig()
|
||||
cfg.ListenAddress = "tcp://0.0.0.0:36656"
|
||||
cfg.SkipUPNP = true
|
||||
cfg.FlushThrottleTimeout = 10
|
||||
return cfg
|
||||
}
|
||||
|
||||
// AddrBookFile returns the full path to the address book
|
||||
func (p *P2PConfig) AddrBookFile() string {
|
||||
return rootify(p.AddrBook, p.RootDir)
|
||||
func (cfg *P2PConfig) AddrBookFile() string {
|
||||
return rootify(cfg.AddrBook, cfg.RootDir)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -323,6 +335,7 @@ type MempoolConfig struct {
|
||||
RecheckEmpty bool `mapstructure:"recheck_empty"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
Size int `mapstructure:"size"`
|
||||
CacheSize int `mapstructure:"cache_size"`
|
||||
}
|
||||
|
||||
@@ -333,20 +346,21 @@ func DefaultMempoolConfig() *MempoolConfig {
|
||||
RecheckEmpty: true,
|
||||
Broadcast: true,
|
||||
WalPath: filepath.Join(defaultDataDir, "mempool.wal"),
|
||||
Size: 100000,
|
||||
CacheSize: 100000,
|
||||
}
|
||||
}
|
||||
|
||||
// TestMempoolConfig returns a configuration for testing the Tendermint mempool
|
||||
func TestMempoolConfig() *MempoolConfig {
|
||||
config := DefaultMempoolConfig()
|
||||
config.CacheSize = 1000
|
||||
return config
|
||||
cfg := DefaultMempoolConfig()
|
||||
cfg.CacheSize = 1000
|
||||
return cfg
|
||||
}
|
||||
|
||||
// WalDir returns the full path to the mempool's write-ahead log
|
||||
func (m *MempoolConfig) WalDir() string {
|
||||
return rootify(m.WalPath, m.RootDir)
|
||||
func (cfg *MempoolConfig) WalDir() string {
|
||||
return rootify(cfg.WalPath, cfg.RootDir)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -355,10 +369,9 @@ func (m *MempoolConfig) WalDir() string {
|
||||
// ConsensusConfig defines the confuguration for the Tendermint consensus service,
|
||||
// including timeouts and details about the WAL and the block structure.
|
||||
type ConsensusConfig struct {
|
||||
RootDir string `mapstructure:"home"`
|
||||
WalPath string `mapstructure:"wal_file"`
|
||||
WalLight bool `mapstructure:"wal_light"`
|
||||
walFile string // overrides WalPath if set
|
||||
RootDir string `mapstructure:"home"`
|
||||
WalPath string `mapstructure:"wal_file"`
|
||||
walFile string // overrides WalPath if set
|
||||
|
||||
// All timeouts are in milliseconds
|
||||
TimeoutPropose int `mapstructure:"timeout_propose"`
|
||||
@@ -385,6 +398,43 @@ type ConsensusConfig struct {
|
||||
PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"`
|
||||
}
|
||||
|
||||
// DefaultConsensusConfig returns a default configuration for the consensus service
|
||||
func DefaultConsensusConfig() *ConsensusConfig {
|
||||
return &ConsensusConfig{
|
||||
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
|
||||
TimeoutPropose: 3000,
|
||||
TimeoutProposeDelta: 500,
|
||||
TimeoutPrevote: 1000,
|
||||
TimeoutPrevoteDelta: 500,
|
||||
TimeoutPrecommit: 1000,
|
||||
TimeoutPrecommitDelta: 500,
|
||||
TimeoutCommit: 1000,
|
||||
SkipTimeoutCommit: false,
|
||||
MaxBlockSizeTxs: 10000,
|
||||
MaxBlockSizeBytes: 1, // TODO
|
||||
CreateEmptyBlocks: true,
|
||||
CreateEmptyBlocksInterval: 0,
|
||||
PeerGossipSleepDuration: 100,
|
||||
PeerQueryMaj23SleepDuration: 2000,
|
||||
}
|
||||
}
|
||||
|
||||
// TestConsensusConfig returns a configuration for testing the consensus service
|
||||
func TestConsensusConfig() *ConsensusConfig {
|
||||
cfg := DefaultConsensusConfig()
|
||||
cfg.TimeoutPropose = 100
|
||||
cfg.TimeoutProposeDelta = 1
|
||||
cfg.TimeoutPrevote = 10
|
||||
cfg.TimeoutPrevoteDelta = 1
|
||||
cfg.TimeoutPrecommit = 10
|
||||
cfg.TimeoutPrecommitDelta = 1
|
||||
cfg.TimeoutCommit = 10
|
||||
cfg.SkipTimeoutCommit = true
|
||||
cfg.PeerGossipSleepDuration = 5
|
||||
cfg.PeerQueryMaj23SleepDuration = 250
|
||||
return cfg
|
||||
}
|
||||
|
||||
// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step
|
||||
func (cfg *ConsensusConfig) WaitForTxs() bool {
|
||||
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
|
||||
@@ -425,55 +475,17 @@ func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration {
|
||||
return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond
|
||||
}
|
||||
|
||||
// DefaultConsensusConfig returns a default configuration for the consensus service
|
||||
func DefaultConsensusConfig() *ConsensusConfig {
|
||||
return &ConsensusConfig{
|
||||
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
|
||||
WalLight: false,
|
||||
TimeoutPropose: 3000,
|
||||
TimeoutProposeDelta: 500,
|
||||
TimeoutPrevote: 1000,
|
||||
TimeoutPrevoteDelta: 500,
|
||||
TimeoutPrecommit: 1000,
|
||||
TimeoutPrecommitDelta: 500,
|
||||
TimeoutCommit: 1000,
|
||||
SkipTimeoutCommit: false,
|
||||
MaxBlockSizeTxs: 10000,
|
||||
MaxBlockSizeBytes: 1, // TODO
|
||||
CreateEmptyBlocks: true,
|
||||
CreateEmptyBlocksInterval: 0,
|
||||
PeerGossipSleepDuration: 100,
|
||||
PeerQueryMaj23SleepDuration: 2000,
|
||||
}
|
||||
}
|
||||
|
||||
// TestConsensusConfig returns a configuration for testing the consensus service
|
||||
func TestConsensusConfig() *ConsensusConfig {
|
||||
config := DefaultConsensusConfig()
|
||||
config.TimeoutPropose = 100
|
||||
config.TimeoutProposeDelta = 1
|
||||
config.TimeoutPrevote = 10
|
||||
config.TimeoutPrevoteDelta = 1
|
||||
config.TimeoutPrecommit = 10
|
||||
config.TimeoutPrecommitDelta = 1
|
||||
config.TimeoutCommit = 10
|
||||
config.SkipTimeoutCommit = true
|
||||
config.PeerGossipSleepDuration = 5
|
||||
config.PeerQueryMaj23SleepDuration = 250
|
||||
return config
|
||||
}
|
||||
|
||||
// WalFile returns the full path to the write-ahead log file
|
||||
func (c *ConsensusConfig) WalFile() string {
|
||||
if c.walFile != "" {
|
||||
return c.walFile
|
||||
func (cfg *ConsensusConfig) WalFile() string {
|
||||
if cfg.walFile != "" {
|
||||
return cfg.walFile
|
||||
}
|
||||
return rootify(c.WalPath, c.RootDir)
|
||||
return rootify(cfg.WalPath, cfg.RootDir)
|
||||
}
|
||||
|
||||
// SetWalFile sets the path to the write-ahead log file
|
||||
func (c *ConsensusConfig) SetWalFile(walFile string) {
|
||||
c.walFile = walFile
|
||||
func (cfg *ConsensusConfig) SetWalFile(walFile string) {
|
||||
cfg.walFile = walFile
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
@@ -37,16 +37,21 @@ func EnsureRoot(rootDir string) {
|
||||
|
||||
// Write default config file if missing.
|
||||
if !cmn.FileExists(configFilePath) {
|
||||
writeConfigFile(configFilePath)
|
||||
writeDefaultConfigFile(configFilePath)
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: this func should probably be called by cmd/tendermint/commands/init.go
|
||||
// alongside the writing of the genesis.json and priv_validator.json
|
||||
func writeConfigFile(configFilePath string) {
|
||||
func writeDefaultConfigFile(configFilePath string) {
|
||||
WriteConfigFile(configFilePath, DefaultConfig())
|
||||
}
|
||||
|
||||
// WriteConfigFile renders config using the template and writes it to configFilePath.
|
||||
func WriteConfigFile(configFilePath string, config *Config) {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
if err := configTemplate.Execute(&buffer, DefaultConfig()); err != nil {
|
||||
if err := configTemplate.Execute(&buffer, config); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -124,10 +129,11 @@ unsafe = {{ .RPC.Unsafe }}
|
||||
laddr = "{{ .P2P.ListenAddress }}"
|
||||
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = ""
|
||||
seeds = "{{ .P2P.Seeds }}"
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
persistent_peers = ""
|
||||
# Do not add private peers to this list if you don't want them advertised
|
||||
persistent_peers = "{{ .P2P.PersistentPeers }}"
|
||||
|
||||
# Path to address book
|
||||
addr_book_file = "{{ .P2P.AddrBook }}"
|
||||
@@ -142,7 +148,7 @@ flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }}
|
||||
max_num_peers = {{ .P2P.MaxNumPeers }}
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
max_msg_packet_payload_size = {{ .P2P.MaxMsgPacketPayloadSize }}
|
||||
max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }}
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
send_rate = {{ .P2P.SendRate }}
|
||||
@@ -159,6 +165,12 @@ pex = {{ .P2P.PexReactor }}
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = {{ .P2P.SeedMode }}
|
||||
|
||||
# Authenticated encryption
|
||||
auth_enc = {{ .P2P.AuthEnc }}
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = "{{ .P2P.PrivatePeerIDs }}"
|
||||
|
||||
##### mempool configuration options #####
|
||||
[mempool]
|
||||
|
||||
@@ -167,11 +179,16 @@ recheck_empty = {{ .Mempool.RecheckEmpty }}
|
||||
broadcast = {{ .Mempool.Broadcast }}
|
||||
wal_dir = "{{ .Mempool.WalPath }}"
|
||||
|
||||
# size of the mempool
|
||||
size = {{ .Mempool.Size }}
|
||||
|
||||
# size of the cache (used to filter transactions we saw earlier)
|
||||
cache_size = {{ .Mempool.CacheSize }}
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
wal_file = "{{ .Consensus.WalPath }}"
|
||||
wal_light = {{ .Consensus.WalLight }}
|
||||
|
||||
# All timeouts are in milliseconds
|
||||
timeout_propose = {{ .Consensus.TimeoutPropose }}
|
||||
@@ -255,7 +272,7 @@ func ResetTestRoot(testName string) *Config {
|
||||
|
||||
// Write default config file if missing.
|
||||
if !cmn.FileExists(configFilePath) {
|
||||
writeConfigFile(configFilePath)
|
||||
writeDefaultConfigFile(configFilePath)
|
||||
}
|
||||
if !cmn.FileExists(genesisFilePath) {
|
||||
cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
|
||||
@@ -273,8 +290,8 @@ var testGenesis = `{
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data":"3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
|
||||
"type": "AC26791624DE60",
|
||||
"value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
@@ -284,14 +301,14 @@ var testGenesis = `{
|
||||
}`
|
||||
|
||||
var testPrivValidator = `{
|
||||
"address": "D028C9981F7A87F3093672BF0D5B0E2A1B3ED456",
|
||||
"address": "849CB2C877F87A20925F35D00AE6688342D25B47",
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
|
||||
"type": "AC26791624DE60",
|
||||
"value": "AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
|
||||
},
|
||||
"priv_key": {
|
||||
"type": "ed25519",
|
||||
"data": "27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
|
||||
"type": "954568A3288910",
|
||||
"value": "EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
|
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
@@ -46,9 +45,11 @@ func TestByzantine(t *testing.T) {
|
||||
eventChans := make([]chan interface{}, N)
|
||||
reactors := make([]p2p.Reactor, N)
|
||||
for i := 0; i < N; i++ {
|
||||
// make first val byzantine
|
||||
if i == 0 {
|
||||
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator)
|
||||
// make byzantine
|
||||
// NOTE: Now, test validators are MockPV, which by default doesn't
|
||||
// do any safety checks.
|
||||
css[i].privValidator.(*types.MockPV).DisableChecks()
|
||||
css[i].decideProposal = func(j int) func(int64, int) {
|
||||
return func(height int64, round int) {
|
||||
byzantineDecideProposalFunc(t, height, round, css[j], switches[j])
|
||||
@@ -74,9 +75,11 @@ func TestByzantine(t *testing.T) {
|
||||
var conRI p2p.Reactor // nolint: gotype, gosimple
|
||||
conRI = conR
|
||||
|
||||
// make first val byzantine
|
||||
if i == 0 {
|
||||
conRI = NewByzantineReactor(conR)
|
||||
}
|
||||
|
||||
reactors[i] = conRI
|
||||
}
|
||||
|
||||
@@ -115,19 +118,19 @@ func TestByzantine(t *testing.T) {
|
||||
// and the other block to peers[1] and peers[2].
|
||||
// note peers and switches order don't match.
|
||||
peers := switches[0].Peers().List()
|
||||
|
||||
// partition A
|
||||
ind0 := getSwitchIndex(switches, peers[0])
|
||||
|
||||
// partition B
|
||||
ind1 := getSwitchIndex(switches, peers[1])
|
||||
ind2 := getSwitchIndex(switches, peers[2])
|
||||
|
||||
// connect the 2 peers in the larger partition
|
||||
p2p.Connect2Switches(switches, ind1, ind2)
|
||||
|
||||
// wait for someone in the big partition to make a block
|
||||
// wait for someone in the big partition (B) to make a block
|
||||
<-eventChans[ind2]
|
||||
|
||||
t.Log("A block has been committed. Healing partition")
|
||||
|
||||
// connect the partitions
|
||||
p2p.Connect2Switches(switches, ind0, ind1)
|
||||
p2p.Connect2Switches(switches, ind0, ind2)
|
||||
|
||||
@@ -201,7 +204,7 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Cons
|
||||
func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
|
||||
// proposal
|
||||
msg := &ProposalMessage{Proposal: proposal}
|
||||
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
|
||||
peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))
|
||||
|
||||
// parts
|
||||
for i := 0; i < parts.Total(); i++ {
|
||||
@@ -211,7 +214,7 @@ func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
}
|
||||
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
|
||||
peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))
|
||||
}
|
||||
|
||||
// votes
|
||||
@@ -220,8 +223,8 @@ func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.
|
||||
precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header())
|
||||
cs.mtx.Unlock()
|
||||
|
||||
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{prevote}})
|
||||
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{precommit}})
|
||||
peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{prevote}))
|
||||
peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{precommit}))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -262,47 +265,3 @@ func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
br.reactor.Receive(chID, peer, msgBytes)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// byzantine privValidator
|
||||
|
||||
type ByzantinePrivValidator struct {
|
||||
types.Signer
|
||||
|
||||
pv types.PrivValidator
|
||||
}
|
||||
|
||||
// Return a priv validator that will sign anything
|
||||
func NewByzantinePrivValidator(pv types.PrivValidator) *ByzantinePrivValidator {
|
||||
return &ByzantinePrivValidator{
|
||||
Signer: pv.(*types.PrivValidatorFS).Signer,
|
||||
pv: pv,
|
||||
}
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) GetAddress() types.Address {
|
||||
return privVal.pv.GetAddress()
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) GetPubKey() crypto.PubKey {
|
||||
return privVal.pv.GetPubKey()
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote) (err error) {
|
||||
vote.Signature, err = privVal.Sign(types.SignBytes(chainID, vote))
|
||||
return err
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) (err error) {
|
||||
proposal.Signature, _ = privVal.Sign(types.SignBytes(chainID, proposal))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) (err error) {
|
||||
heartbeat.Signature, _ = privVal.Sign(types.SignBytes(chainID, heartbeat))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) String() string {
|
||||
return cmn.Fmt("PrivValidator{%X}", privVal.GetAddress())
|
||||
}
|
||||
|
@@ -21,12 +21,13 @@ import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
dbm "github.com/tendermint/tmlibs/db"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
"github.com/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
"github.com/tendermint/abci/example/kvstore"
|
||||
|
||||
"github.com/go-kit/kit/log/term"
|
||||
)
|
||||
@@ -50,7 +51,7 @@ func ResetConfig(name string) *cfg.Config {
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
// validator stub (a dummy consensus peer we control)
|
||||
// validator stub (a kvstore consensus peer we control)
|
||||
|
||||
type validatorStub struct {
|
||||
Index int // Validator index. NOTE: we don't assume validator set changes.
|
||||
@@ -101,13 +102,13 @@ func signVotes(voteType byte, hash []byte, header types.PartSetHeader, vss ...*v
|
||||
|
||||
func incrementHeight(vss ...*validatorStub) {
|
||||
for _, vs := range vss {
|
||||
vs.Height += 1
|
||||
vs.Height++
|
||||
}
|
||||
}
|
||||
|
||||
func incrementRound(vss ...*validatorStub) {
|
||||
for _, vs := range vss {
|
||||
vs.Round += 1
|
||||
vs.Round++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -222,7 +223,7 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
|
||||
voteCh := make(chan interface{})
|
||||
go func() {
|
||||
for v := range voteCh0 {
|
||||
vote := v.(types.TMEventData).Unwrap().(types.EventDataVote)
|
||||
vote := v.(types.EventDataVote)
|
||||
// we only fire for our own votes
|
||||
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
|
||||
voteCh <- v
|
||||
@@ -277,10 +278,10 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
|
||||
return cs
|
||||
}
|
||||
|
||||
func loadPrivValidator(config *cfg.Config) *types.PrivValidatorFS {
|
||||
func loadPrivValidator(config *cfg.Config) *pvm.FilePV {
|
||||
privValidatorFile := config.PrivValidatorFile()
|
||||
ensureDir(path.Dir(privValidatorFile), 0700)
|
||||
privValidator := types.LoadOrGenPrivValidatorFS(privValidatorFile)
|
||||
privValidator := pvm.LoadOrGenFilePV(privValidatorFile)
|
||||
privValidator.Reset()
|
||||
return privValidator
|
||||
}
|
||||
@@ -378,7 +379,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
||||
privVal = privVals[i]
|
||||
} else {
|
||||
_, tempFilePath := cmn.Tempfile("priv_validator_")
|
||||
privVal = types.GenPrivValidatorFS(tempFilePath)
|
||||
privVal = pvm.GenFilePV(tempFilePath)
|
||||
}
|
||||
|
||||
app := appFunc()
|
||||
@@ -394,7 +395,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
||||
|
||||
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
|
||||
for i, s := range switches {
|
||||
if bytes.Equal(peer.NodeInfo().PubKey.Address(), s.NodeInfo().PubKey.Address()) {
|
||||
if peer.NodeInfo().ID == s.NodeInfo().ID {
|
||||
return i
|
||||
}
|
||||
}
|
||||
@@ -405,9 +406,9 @@ func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
|
||||
//-------------------------------------------------------------------------------
|
||||
// genesis
|
||||
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []*types.PrivValidatorFS) {
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]*types.PrivValidatorFS, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
@@ -425,7 +426,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []*types.PrivValidatorFS) {
|
||||
func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) {
|
||||
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
|
||||
s0, _ := sm.MakeGenesisState(genDoc)
|
||||
db := dbm.NewMemDB()
|
||||
@@ -488,7 +489,7 @@ func newCounter() abci.Application {
|
||||
return counter.NewCounterApplication(true)
|
||||
}
|
||||
|
||||
func newPersistentDummy() abci.Application {
|
||||
dir, _ := ioutil.TempDir("/tmp", "persistent-dummy")
|
||||
return dummy.NewPersistentDummyApplication(dir)
|
||||
func newPersistentKVStore() abci.Application {
|
||||
dir, _ := ioutil.TempDir("/tmp", "persistent-kvstore")
|
||||
return kvstore.NewPersistentKVStoreApplication(dir)
|
||||
}
|
||||
|
@@ -108,7 +108,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
ticker := time.NewTicker(time.Second * 30)
|
||||
select {
|
||||
case b := <-newBlockCh:
|
||||
evt := b.(types.TMEventData).Unwrap().(types.EventDataNewBlock)
|
||||
evt := b.(types.EventDataNewBlock)
|
||||
nTxs += int(evt.Block.Header.NumTxs)
|
||||
case <-ticker.C:
|
||||
panic("Timed out waiting to commit blocks with transactions")
|
||||
@@ -152,6 +152,7 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
txs := cs.mempool.Reap(1)
|
||||
if len(txs) == 0 {
|
||||
emptyMempoolCh <- struct{}{}
|
||||
return
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
@@ -199,7 +200,7 @@ func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx {
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
app.txCount += 1
|
||||
app.txCount++
|
||||
return abci.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
@@ -210,7 +211,7 @@ func (app *CounterApplication) CheckTx(tx []byte) abci.ResponseCheckTx {
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue)}
|
||||
}
|
||||
app.mempoolTxCount += 1
|
||||
app.mempoolTxCount++
|
||||
return abci.ResponseCheckTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
@@ -224,9 +225,8 @@ func (app *CounterApplication) Commit() abci.ResponseCommit {
|
||||
app.mempoolTxCount = app.txCount
|
||||
if app.txCount == 0 {
|
||||
return abci.ResponseCommit{}
|
||||
} else {
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return abci.ResponseCommit{Data: hash}
|
||||
}
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return abci.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
@@ -1,7 +1,6 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
@@ -10,7 +9,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
wire "github.com/tendermint/go-wire"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
@@ -26,7 +25,9 @@ const (
|
||||
VoteChannel = byte(0x22)
|
||||
VoteSetBitsChannel = byte(0x23)
|
||||
|
||||
maxConsensusMessageSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
|
||||
maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
|
||||
|
||||
blocksToContributeToBecomeGoodPeer = 10000
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -108,27 +109,31 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
// TODO optimize
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: StateChannel,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 100,
|
||||
ID: StateChannel,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 100,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
},
|
||||
{
|
||||
ID: DataChannel, // maybe split between gossiping current block and catchup stuff
|
||||
Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
ID: DataChannel, // maybe split between gossiping current block and catchup stuff
|
||||
Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
},
|
||||
{
|
||||
ID: VoteChannel,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 100 * 100,
|
||||
ID: VoteChannel,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 100,
|
||||
RecvBufferCapacity: 100 * 100,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
},
|
||||
{
|
||||
ID: VoteSetBitsChannel,
|
||||
Priority: 1,
|
||||
SendQueueCapacity: 2,
|
||||
RecvBufferCapacity: 1024,
|
||||
ID: VoteSetBitsChannel,
|
||||
Priority: 1,
|
||||
SendQueueCapacity: 2,
|
||||
RecvBufferCapacity: 1024,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -176,10 +181,10 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
return
|
||||
}
|
||||
|
||||
_, msg, err := DecodeMessage(msgBytes)
|
||||
msg, err := DecodeMessage(msgBytes)
|
||||
if err != nil {
|
||||
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
// TODO punish peer?
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
|
||||
@@ -205,7 +210,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
return
|
||||
}
|
||||
// Peer claims to have a maj23 for some BlockID at H,R,S,
|
||||
err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.Peer.ID(), msg.BlockID)
|
||||
err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID)
|
||||
if err != nil {
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
@@ -222,13 +227,13 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
conR.Logger.Error("Bad VoteSetBitsMessage field Type")
|
||||
return
|
||||
}
|
||||
src.TrySend(VoteSetBitsChannel, struct{ ConsensusMessage }{&VoteSetBitsMessage{
|
||||
src.TrySend(VoteSetBitsChannel, cdc.MustMarshalBinaryBare(&VoteSetBitsMessage{
|
||||
Height: msg.Height,
|
||||
Round: msg.Round,
|
||||
Type: msg.Type,
|
||||
BlockID: msg.BlockID,
|
||||
Votes: ourVotes,
|
||||
}})
|
||||
}))
|
||||
case *ProposalHeartbeatMessage:
|
||||
hb := msg.Heartbeat
|
||||
conR.Logger.Debug("Received proposal heartbeat message",
|
||||
@@ -251,6 +256,9 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
ps.ApplyProposalPOLMessage(msg)
|
||||
case *BlockPartMessage:
|
||||
ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index)
|
||||
if numBlocks := ps.RecordBlockPart(msg); numBlocks%blocksToContributeToBecomeGoodPeer == 0 {
|
||||
conR.Switch.MarkPeerAsGood(src)
|
||||
}
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
default:
|
||||
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
@@ -270,6 +278,9 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
ps.EnsureVoteBitArrays(height, valSize)
|
||||
ps.EnsureVoteBitArrays(height-1, lastCommitSize)
|
||||
ps.SetHasVote(msg.Vote)
|
||||
if blocks := ps.RecordVote(msg.Vote); blocks%blocksToContributeToBecomeGoodPeer == 0 {
|
||||
conR.Switch.MarkPeerAsGood(src)
|
||||
}
|
||||
|
||||
cs.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
|
||||
@@ -363,27 +374,33 @@ func (conR *ConsensusReactor) startBroadcastRoutine() error {
|
||||
}
|
||||
|
||||
go func() {
|
||||
var data interface{}
|
||||
var ok bool
|
||||
for {
|
||||
select {
|
||||
case data, ok := <-stepsCh:
|
||||
case data, ok = <-stepsCh:
|
||||
if ok { // a receive from a closed channel returns the zero value immediately
|
||||
edrs := data.(types.TMEventData).Unwrap().(types.EventDataRoundState)
|
||||
edrs := data.(types.EventDataRoundState)
|
||||
conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState))
|
||||
}
|
||||
case data, ok := <-votesCh:
|
||||
case data, ok = <-votesCh:
|
||||
if ok {
|
||||
edv := data.(types.TMEventData).Unwrap().(types.EventDataVote)
|
||||
edv := data.(types.EventDataVote)
|
||||
conR.broadcastHasVoteMessage(edv.Vote)
|
||||
}
|
||||
case data, ok := <-heartbeatsCh:
|
||||
case data, ok = <-heartbeatsCh:
|
||||
if ok {
|
||||
edph := data.(types.TMEventData).Unwrap().(types.EventDataProposalHeartbeat)
|
||||
edph := data.(types.EventDataProposalHeartbeat)
|
||||
conR.broadcastProposalHeartbeatMessage(edph)
|
||||
}
|
||||
case <-conR.Quit():
|
||||
conR.eventBus.UnsubscribeAll(ctx, subscriber)
|
||||
return
|
||||
}
|
||||
if !ok {
|
||||
conR.eventBus.UnsubscribeAll(ctx, subscriber)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -395,16 +412,16 @@ func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(heartbeat types.
|
||||
conR.Logger.Debug("Broadcasting proposal heartbeat message",
|
||||
"height", hb.Height, "round", hb.Round, "sequence", hb.Sequence)
|
||||
msg := &ProposalHeartbeatMessage{hb}
|
||||
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{msg})
|
||||
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
|
||||
}
|
||||
|
||||
func (conR *ConsensusReactor) broadcastNewRoundStep(rs *cstypes.RoundState) {
|
||||
nrsMsg, csMsg := makeRoundStepMessages(rs)
|
||||
if nrsMsg != nil {
|
||||
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{nrsMsg})
|
||||
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
|
||||
}
|
||||
if csMsg != nil {
|
||||
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{csMsg})
|
||||
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -416,7 +433,7 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
Type: vote.Type,
|
||||
Index: vote.ValidatorIndex,
|
||||
}
|
||||
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{msg})
|
||||
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
|
||||
/*
|
||||
// TODO: Make this broadcast more selective.
|
||||
for _, peer := range conR.Switch.Peers().List() {
|
||||
@@ -456,10 +473,10 @@ func (conR *ConsensusReactor) sendNewRoundStepMessages(peer p2p.Peer) {
|
||||
rs := conR.conS.GetRoundState()
|
||||
nrsMsg, csMsg := makeRoundStepMessages(rs)
|
||||
if nrsMsg != nil {
|
||||
peer.Send(StateChannel, struct{ ConsensusMessage }{nrsMsg})
|
||||
peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
|
||||
}
|
||||
if csMsg != nil {
|
||||
peer.Send(StateChannel, struct{ ConsensusMessage }{csMsg})
|
||||
peer.Send(StateChannel, cdc.MustMarshalBinaryBare(csMsg))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -486,7 +503,7 @@ OUTER_LOOP:
|
||||
Part: part,
|
||||
}
|
||||
logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
|
||||
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
|
||||
if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) {
|
||||
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
||||
}
|
||||
continue OUTER_LOOP
|
||||
@@ -530,7 +547,7 @@ OUTER_LOOP:
|
||||
{
|
||||
msg := &ProposalMessage{Proposal: rs.Proposal}
|
||||
logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
|
||||
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
|
||||
if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) {
|
||||
ps.SetHasProposal(rs.Proposal)
|
||||
}
|
||||
}
|
||||
@@ -545,7 +562,7 @@ OUTER_LOOP:
|
||||
ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(),
|
||||
}
|
||||
logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
|
||||
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
|
||||
peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))
|
||||
}
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
@@ -588,17 +605,15 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
|
||||
Part: part,
|
||||
}
|
||||
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
|
||||
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
|
||||
if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) {
|
||||
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
|
||||
} else {
|
||||
logger.Debug("Sending block part for catchup failed")
|
||||
}
|
||||
return
|
||||
} else {
|
||||
//logger.Info("No parts to send in catch-up, sleeping")
|
||||
time.Sleep(conR.conS.config.PeerGossipSleep())
|
||||
return
|
||||
}
|
||||
//logger.Info("No parts to send in catch-up, sleeping")
|
||||
time.Sleep(conR.conS.config.PeerGossipSleep())
|
||||
}
|
||||
|
||||
func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
|
||||
@@ -681,20 +696,37 @@ func (conR *ConsensusReactor) gossipVotesForHeight(logger log.Logger, rs *cstype
|
||||
return true
|
||||
}
|
||||
}
|
||||
// If there are POL prevotes to send...
|
||||
if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 {
|
||||
if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
|
||||
if ps.PickSendVote(polPrevotes) {
|
||||
logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send",
|
||||
"round", prs.ProposalPOLRound)
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
// If there are prevotes to send...
|
||||
if prs.Step <= cstypes.RoundStepPrevote && prs.Round != -1 && prs.Round <= rs.Round {
|
||||
if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round {
|
||||
if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
|
||||
logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round)
|
||||
return true
|
||||
}
|
||||
}
|
||||
// If there are precommits to send...
|
||||
if prs.Step <= cstypes.RoundStepPrecommit && prs.Round != -1 && prs.Round <= rs.Round {
|
||||
if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round {
|
||||
if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) {
|
||||
logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round)
|
||||
return true
|
||||
}
|
||||
}
|
||||
// If there are prevotes to send...Needed because of validBlock mechanism
|
||||
if prs.Round != -1 && prs.Round <= rs.Round {
|
||||
if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
|
||||
logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round)
|
||||
return true
|
||||
}
|
||||
}
|
||||
// If there are POLPrevotes to send...
|
||||
if prs.ProposalPOLRound != -1 {
|
||||
if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
|
||||
@@ -705,6 +737,7 @@ func (conR *ConsensusReactor) gossipVotesForHeight(logger log.Logger, rs *cstype
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -727,12 +760,12 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
|
||||
peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: types.VoteTypePrevote,
|
||||
BlockID: maj23,
|
||||
}})
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
|
||||
}
|
||||
}
|
||||
@@ -744,12 +777,12 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height {
|
||||
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
|
||||
peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.Round,
|
||||
Type: types.VoteTypePrecommit,
|
||||
BlockID: maj23,
|
||||
}})
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
|
||||
}
|
||||
}
|
||||
@@ -761,12 +794,12 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
|
||||
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
|
||||
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
|
||||
peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: prs.ProposalPOLRound,
|
||||
Type: types.VoteTypePrevote,
|
||||
BlockID: maj23,
|
||||
}})
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
|
||||
}
|
||||
}
|
||||
@@ -780,12 +813,12 @@ OUTER_LOOP:
|
||||
prs := ps.GetRoundState()
|
||||
if prs.CatchupCommitRound != -1 && 0 < prs.Height && prs.Height <= conR.conS.blockStore.Height() {
|
||||
commit := conR.conS.LoadCommit(prs.Height)
|
||||
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
|
||||
peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
|
||||
Height: prs.Height,
|
||||
Round: commit.Round(),
|
||||
Type: types.VoteTypePrecommit,
|
||||
BlockID: commit.BlockID,
|
||||
}})
|
||||
}))
|
||||
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
|
||||
}
|
||||
}
|
||||
@@ -823,51 +856,78 @@ var (
|
||||
ErrPeerStateInvalidStartTime = errors.New("Error peer state invalid startTime")
|
||||
)
|
||||
|
||||
// PeerState contains the known state of a peer, including its connection
|
||||
// and threadsafe access to its PeerRoundState.
|
||||
// PeerState contains the known state of a peer, including its connection and
|
||||
// threadsafe access to its PeerRoundState.
|
||||
// NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go.
|
||||
// Be mindful of what you Expose.
|
||||
type PeerState struct {
|
||||
Peer p2p.Peer
|
||||
peer p2p.Peer
|
||||
logger log.Logger
|
||||
|
||||
mtx sync.Mutex
|
||||
cstypes.PeerRoundState
|
||||
mtx sync.Mutex `json:"-"` // NOTE: Modify below using setters, never directly.
|
||||
PRS cstypes.PeerRoundState `json:"round_state"` // Exposed.
|
||||
Stats *peerStateStats `json:"stats"` // Exposed.
|
||||
}
|
||||
|
||||
// peerStateStats holds internal statistics for a peer.
|
||||
type peerStateStats struct {
|
||||
LastVoteHeight int64 `json:"last_vote_height"`
|
||||
Votes int `json:"votes"`
|
||||
LastBlockPartHeight int64 `json:"last_block_part_height"`
|
||||
BlockParts int `json:"block_parts"`
|
||||
}
|
||||
|
||||
func (pss peerStateStats) String() string {
|
||||
return fmt.Sprintf("peerStateStats{lvh: %d, votes: %d, lbph: %d, blockParts: %d}",
|
||||
pss.LastVoteHeight, pss.Votes, pss.LastBlockPartHeight, pss.BlockParts)
|
||||
}
|
||||
|
||||
// NewPeerState returns a new PeerState for the given Peer
|
||||
func NewPeerState(peer p2p.Peer) *PeerState {
|
||||
return &PeerState{
|
||||
Peer: peer,
|
||||
peer: peer,
|
||||
logger: log.NewNopLogger(),
|
||||
PeerRoundState: cstypes.PeerRoundState{
|
||||
PRS: cstypes.PeerRoundState{
|
||||
Round: -1,
|
||||
ProposalPOLRound: -1,
|
||||
LastCommitRound: -1,
|
||||
CatchupCommitRound: -1,
|
||||
},
|
||||
Stats: &peerStateStats{},
|
||||
}
|
||||
}
|
||||
|
||||
// SetLogger allows to set a logger on the peer state. Returns the peer state
|
||||
// itself.
|
||||
func (ps *PeerState) SetLogger(logger log.Logger) *PeerState {
|
||||
ps.logger = logger
|
||||
return ps
|
||||
}
|
||||
|
||||
// GetRoundState returns an atomic snapshot of the PeerRoundState.
|
||||
// GetRoundState returns an shallow copy of the PeerRoundState.
|
||||
// There's no point in mutating it since it won't change PeerState.
|
||||
func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
prs := ps.PeerRoundState // copy
|
||||
prs := ps.PRS // copy
|
||||
return &prs
|
||||
}
|
||||
|
||||
// ToJSON returns a json of PeerState, marshalled using go-amino.
|
||||
func (ps *PeerState) ToJSON() ([]byte, error) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
return cdc.MarshalJSON(ps)
|
||||
}
|
||||
|
||||
// GetHeight returns an atomic snapshot of the PeerRoundState's height
|
||||
// used by the mempool to ensure peers are caught up before broadcasting new txs
|
||||
func (ps *PeerState) GetHeight() int64 {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
return ps.PeerRoundState.Height
|
||||
return ps.PRS.Height
|
||||
}
|
||||
|
||||
// SetHasProposal sets the given proposal as known for the peer.
|
||||
@@ -875,18 +935,18 @@ func (ps *PeerState) SetHasProposal(proposal *types.Proposal) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.Height != proposal.Height || ps.Round != proposal.Round {
|
||||
if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round {
|
||||
return
|
||||
}
|
||||
if ps.Proposal {
|
||||
if ps.PRS.Proposal {
|
||||
return
|
||||
}
|
||||
|
||||
ps.Proposal = true
|
||||
ps.ProposalBlockPartsHeader = proposal.BlockPartsHeader
|
||||
ps.ProposalBlockParts = cmn.NewBitArray(proposal.BlockPartsHeader.Total)
|
||||
ps.ProposalPOLRound = proposal.POLRound
|
||||
ps.ProposalPOL = nil // Nil until ProposalPOLMessage received.
|
||||
ps.PRS.Proposal = true
|
||||
ps.PRS.ProposalBlockPartsHeader = proposal.BlockPartsHeader
|
||||
ps.PRS.ProposalBlockParts = cmn.NewBitArray(proposal.BlockPartsHeader.Total)
|
||||
ps.PRS.ProposalPOLRound = proposal.POLRound
|
||||
ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received.
|
||||
}
|
||||
|
||||
// InitProposalBlockParts initializes the peer's proposal block parts header and bit array.
|
||||
@@ -894,12 +954,12 @@ func (ps *PeerState) InitProposalBlockParts(partsHeader types.PartSetHeader) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.ProposalBlockParts != nil {
|
||||
if ps.PRS.ProposalBlockParts != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ps.ProposalBlockPartsHeader = partsHeader
|
||||
ps.ProposalBlockParts = cmn.NewBitArray(partsHeader.Total)
|
||||
ps.PRS.ProposalBlockPartsHeader = partsHeader
|
||||
ps.PRS.ProposalBlockParts = cmn.NewBitArray(partsHeader.Total)
|
||||
}
|
||||
|
||||
// SetHasProposalBlockPart sets the given block part index as known for the peer.
|
||||
@@ -907,11 +967,11 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int, index int)
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.Height != height || ps.Round != round {
|
||||
if ps.PRS.Height != height || ps.PRS.Round != round {
|
||||
return
|
||||
}
|
||||
|
||||
ps.ProposalBlockParts.SetIndex(index, true)
|
||||
ps.PRS.ProposalBlockParts.SetIndex(index, true)
|
||||
}
|
||||
|
||||
// PickSendVote picks a vote and sends it to the peer.
|
||||
@@ -920,7 +980,7 @@ func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
|
||||
if vote, ok := ps.PickVoteToSend(votes); ok {
|
||||
msg := &VoteMessage{vote}
|
||||
ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
|
||||
return ps.Peer.Send(VoteChannel, struct{ ConsensusMessage }{msg})
|
||||
return ps.peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg))
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -960,40 +1020,40 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.B
|
||||
return nil
|
||||
}
|
||||
|
||||
if ps.Height == height {
|
||||
if ps.Round == round {
|
||||
if ps.PRS.Height == height {
|
||||
if ps.PRS.Round == round {
|
||||
switch type_ {
|
||||
case types.VoteTypePrevote:
|
||||
return ps.Prevotes
|
||||
return ps.PRS.Prevotes
|
||||
case types.VoteTypePrecommit:
|
||||
return ps.Precommits
|
||||
return ps.PRS.Precommits
|
||||
}
|
||||
}
|
||||
if ps.CatchupCommitRound == round {
|
||||
if ps.PRS.CatchupCommitRound == round {
|
||||
switch type_ {
|
||||
case types.VoteTypePrevote:
|
||||
return nil
|
||||
case types.VoteTypePrecommit:
|
||||
return ps.CatchupCommit
|
||||
return ps.PRS.CatchupCommit
|
||||
}
|
||||
}
|
||||
if ps.ProposalPOLRound == round {
|
||||
if ps.PRS.ProposalPOLRound == round {
|
||||
switch type_ {
|
||||
case types.VoteTypePrevote:
|
||||
return ps.ProposalPOL
|
||||
return ps.PRS.ProposalPOL
|
||||
case types.VoteTypePrecommit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if ps.Height == height+1 {
|
||||
if ps.LastCommitRound == round {
|
||||
if ps.PRS.Height == height+1 {
|
||||
if ps.PRS.LastCommitRound == round {
|
||||
switch type_ {
|
||||
case types.VoteTypePrevote:
|
||||
return nil
|
||||
case types.VoteTypePrecommit:
|
||||
return ps.LastCommit
|
||||
return ps.PRS.LastCommit
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -1003,7 +1063,7 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.B
|
||||
|
||||
// 'round': A round for which we have a +2/3 commit.
|
||||
func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValidators int) {
|
||||
if ps.Height != height {
|
||||
if ps.PRS.Height != height {
|
||||
return
|
||||
}
|
||||
/*
|
||||
@@ -1013,18 +1073,18 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida
|
||||
cmn.PanicSanity(cmn.Fmt("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
|
||||
}
|
||||
*/
|
||||
if ps.CatchupCommitRound == round {
|
||||
if ps.PRS.CatchupCommitRound == round {
|
||||
return // Nothing to do!
|
||||
}
|
||||
ps.CatchupCommitRound = round
|
||||
if round == ps.Round {
|
||||
ps.CatchupCommit = ps.Precommits
|
||||
ps.PRS.CatchupCommitRound = round
|
||||
if round == ps.PRS.Round {
|
||||
ps.PRS.CatchupCommit = ps.PRS.Precommits
|
||||
} else {
|
||||
ps.CatchupCommit = cmn.NewBitArray(numValidators)
|
||||
ps.PRS.CatchupCommit = cmn.NewBitArray(numValidators)
|
||||
}
|
||||
}
|
||||
|
||||
// EnsureVoteVitArrays ensures the bit-arrays have been allocated for tracking
|
||||
// EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking
|
||||
// what votes this peer has received.
|
||||
// NOTE: It's important to make sure that numValidators actually matches
|
||||
// what the node sees as the number of validators for height.
|
||||
@@ -1035,26 +1095,76 @@ func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) {
|
||||
}
|
||||
|
||||
func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) {
|
||||
if ps.Height == height {
|
||||
if ps.Prevotes == nil {
|
||||
ps.Prevotes = cmn.NewBitArray(numValidators)
|
||||
if ps.PRS.Height == height {
|
||||
if ps.PRS.Prevotes == nil {
|
||||
ps.PRS.Prevotes = cmn.NewBitArray(numValidators)
|
||||
}
|
||||
if ps.Precommits == nil {
|
||||
ps.Precommits = cmn.NewBitArray(numValidators)
|
||||
if ps.PRS.Precommits == nil {
|
||||
ps.PRS.Precommits = cmn.NewBitArray(numValidators)
|
||||
}
|
||||
if ps.CatchupCommit == nil {
|
||||
ps.CatchupCommit = cmn.NewBitArray(numValidators)
|
||||
if ps.PRS.CatchupCommit == nil {
|
||||
ps.PRS.CatchupCommit = cmn.NewBitArray(numValidators)
|
||||
}
|
||||
if ps.ProposalPOL == nil {
|
||||
ps.ProposalPOL = cmn.NewBitArray(numValidators)
|
||||
if ps.PRS.ProposalPOL == nil {
|
||||
ps.PRS.ProposalPOL = cmn.NewBitArray(numValidators)
|
||||
}
|
||||
} else if ps.Height == height+1 {
|
||||
if ps.LastCommit == nil {
|
||||
ps.LastCommit = cmn.NewBitArray(numValidators)
|
||||
} else if ps.PRS.Height == height+1 {
|
||||
if ps.PRS.LastCommit == nil {
|
||||
ps.PRS.LastCommit = cmn.NewBitArray(numValidators)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RecordVote updates internal statistics for this peer by recording the vote.
|
||||
// It returns the total number of votes (1 per block). This essentially means
|
||||
// the number of blocks for which peer has been sending us votes.
|
||||
func (ps *PeerState) RecordVote(vote *types.Vote) int {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.Stats.LastVoteHeight >= vote.Height {
|
||||
return ps.Stats.Votes
|
||||
}
|
||||
ps.Stats.LastVoteHeight = vote.Height
|
||||
ps.Stats.Votes++
|
||||
return ps.Stats.Votes
|
||||
}
|
||||
|
||||
// VotesSent returns the number of blocks for which peer has been sending us
|
||||
// votes.
|
||||
func (ps *PeerState) VotesSent() int {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
return ps.Stats.Votes
|
||||
}
|
||||
|
||||
// RecordBlockPart updates internal statistics for this peer by recording the
|
||||
// block part. It returns the total number of block parts (1 per block). This
|
||||
// essentially means the number of blocks for which peer has been sending us
|
||||
// block parts.
|
||||
func (ps *PeerState) RecordBlockPart(bp *BlockPartMessage) int {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.Stats.LastBlockPartHeight >= bp.Height {
|
||||
return ps.Stats.BlockParts
|
||||
}
|
||||
|
||||
ps.Stats.LastBlockPartHeight = bp.Height
|
||||
ps.Stats.BlockParts++
|
||||
return ps.Stats.BlockParts
|
||||
}
|
||||
|
||||
// BlockPartsSent returns the number of blocks for which peer has been sending
|
||||
// us block parts.
|
||||
func (ps *PeerState) BlockPartsSent() int {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
return ps.Stats.BlockParts
|
||||
}
|
||||
|
||||
// SetHasVote sets the given vote as known by the peer
|
||||
func (ps *PeerState) SetHasVote(vote *types.Vote) {
|
||||
ps.mtx.Lock()
|
||||
@@ -1064,7 +1174,7 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) {
|
||||
}
|
||||
|
||||
func (ps *PeerState) setHasVote(height int64, round int, type_ byte, index int) {
|
||||
logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.Height, ps.Round), "H/R", cmn.Fmt("%d/%d", height, round))
|
||||
logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", cmn.Fmt("%d/%d", height, round))
|
||||
logger.Debug("setHasVote", "type", type_, "index", index)
|
||||
|
||||
// NOTE: some may be nil BitArrays -> no side effects.
|
||||
@@ -1080,51 +1190,51 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
// Ignore duplicates or decreases
|
||||
if CompareHRS(msg.Height, msg.Round, msg.Step, ps.Height, ps.Round, ps.Step) <= 0 {
|
||||
if CompareHRS(msg.Height, msg.Round, msg.Step, ps.PRS.Height, ps.PRS.Round, ps.PRS.Step) <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Just remember these values.
|
||||
psHeight := ps.Height
|
||||
psRound := ps.Round
|
||||
//psStep := ps.Step
|
||||
psCatchupCommitRound := ps.CatchupCommitRound
|
||||
psCatchupCommit := ps.CatchupCommit
|
||||
psHeight := ps.PRS.Height
|
||||
psRound := ps.PRS.Round
|
||||
//psStep := ps.PRS.Step
|
||||
psCatchupCommitRound := ps.PRS.CatchupCommitRound
|
||||
psCatchupCommit := ps.PRS.CatchupCommit
|
||||
|
||||
startTime := time.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
|
||||
ps.Height = msg.Height
|
||||
ps.Round = msg.Round
|
||||
ps.Step = msg.Step
|
||||
ps.StartTime = startTime
|
||||
ps.PRS.Height = msg.Height
|
||||
ps.PRS.Round = msg.Round
|
||||
ps.PRS.Step = msg.Step
|
||||
ps.PRS.StartTime = startTime
|
||||
if psHeight != msg.Height || psRound != msg.Round {
|
||||
ps.Proposal = false
|
||||
ps.ProposalBlockPartsHeader = types.PartSetHeader{}
|
||||
ps.ProposalBlockParts = nil
|
||||
ps.ProposalPOLRound = -1
|
||||
ps.ProposalPOL = nil
|
||||
ps.PRS.Proposal = false
|
||||
ps.PRS.ProposalBlockPartsHeader = types.PartSetHeader{}
|
||||
ps.PRS.ProposalBlockParts = nil
|
||||
ps.PRS.ProposalPOLRound = -1
|
||||
ps.PRS.ProposalPOL = nil
|
||||
// We'll update the BitArray capacity later.
|
||||
ps.Prevotes = nil
|
||||
ps.Precommits = nil
|
||||
ps.PRS.Prevotes = nil
|
||||
ps.PRS.Precommits = nil
|
||||
}
|
||||
if psHeight == msg.Height && psRound != msg.Round && msg.Round == psCatchupCommitRound {
|
||||
// Peer caught up to CatchupCommitRound.
|
||||
// Preserve psCatchupCommit!
|
||||
// NOTE: We prefer to use prs.Precommits if
|
||||
// pr.Round matches pr.CatchupCommitRound.
|
||||
ps.Precommits = psCatchupCommit
|
||||
ps.PRS.Precommits = psCatchupCommit
|
||||
}
|
||||
if psHeight != msg.Height {
|
||||
// Shift Precommits to LastCommit.
|
||||
if psHeight+1 == msg.Height && psRound == msg.LastCommitRound {
|
||||
ps.LastCommitRound = msg.LastCommitRound
|
||||
ps.LastCommit = ps.Precommits
|
||||
ps.PRS.LastCommitRound = msg.LastCommitRound
|
||||
ps.PRS.LastCommit = ps.PRS.Precommits
|
||||
} else {
|
||||
ps.LastCommitRound = msg.LastCommitRound
|
||||
ps.LastCommit = nil
|
||||
ps.PRS.LastCommitRound = msg.LastCommitRound
|
||||
ps.PRS.LastCommit = nil
|
||||
}
|
||||
// We'll update the BitArray capacity later.
|
||||
ps.CatchupCommitRound = -1
|
||||
ps.CatchupCommit = nil
|
||||
ps.PRS.CatchupCommitRound = -1
|
||||
ps.PRS.CatchupCommit = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1133,12 +1243,12 @@ func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.Height != msg.Height {
|
||||
if ps.PRS.Height != msg.Height {
|
||||
return
|
||||
}
|
||||
|
||||
ps.ProposalBlockPartsHeader = msg.BlockPartsHeader
|
||||
ps.ProposalBlockParts = msg.BlockParts
|
||||
ps.PRS.ProposalBlockPartsHeader = msg.BlockPartsHeader
|
||||
ps.PRS.ProposalBlockParts = msg.BlockParts
|
||||
}
|
||||
|
||||
// ApplyProposalPOLMessage updates the peer state for the new proposal POL.
|
||||
@@ -1146,16 +1256,16 @@ func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.Height != msg.Height {
|
||||
if ps.PRS.Height != msg.Height {
|
||||
return
|
||||
}
|
||||
if ps.ProposalPOLRound != msg.ProposalPOLRound {
|
||||
if ps.PRS.ProposalPOLRound != msg.ProposalPOLRound {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Merge onto existing ps.ProposalPOL?
|
||||
// TODO: Merge onto existing ps.PRS.ProposalPOL?
|
||||
// We might have sent some prevotes in the meantime.
|
||||
ps.ProposalPOL = msg.ProposalPOL
|
||||
ps.PRS.ProposalPOL = msg.ProposalPOL
|
||||
}
|
||||
|
||||
// ApplyHasVoteMessage updates the peer state for the new vote.
|
||||
@@ -1163,7 +1273,7 @@ func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
if ps.Height != msg.Height {
|
||||
if ps.PRS.Height != msg.Height {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1201,56 +1311,43 @@ func (ps *PeerState) StringIndented(indent string) string {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
return fmt.Sprintf(`PeerState{
|
||||
%s Key %v
|
||||
%s PRS %v
|
||||
%s Key %v
|
||||
%s RoundState %v
|
||||
%s Stats %v
|
||||
%s}`,
|
||||
indent, ps.Peer.ID(),
|
||||
indent, ps.PeerRoundState.StringIndented(indent+" "),
|
||||
indent, ps.peer.ID(),
|
||||
indent, ps.PRS.StringIndented(indent+" "),
|
||||
indent, ps.Stats,
|
||||
indent)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Messages
|
||||
|
||||
const (
|
||||
msgTypeNewRoundStep = byte(0x01)
|
||||
msgTypeCommitStep = byte(0x02)
|
||||
msgTypeProposal = byte(0x11)
|
||||
msgTypeProposalPOL = byte(0x12)
|
||||
msgTypeBlockPart = byte(0x13) // both block & POL
|
||||
msgTypeVote = byte(0x14)
|
||||
msgTypeHasVote = byte(0x15)
|
||||
msgTypeVoteSetMaj23 = byte(0x16)
|
||||
msgTypeVoteSetBits = byte(0x17)
|
||||
|
||||
msgTypeProposalHeartbeat = byte(0x20)
|
||||
)
|
||||
|
||||
// ConsensusMessage is a message that can be sent and received on the ConsensusReactor
|
||||
type ConsensusMessage interface{}
|
||||
|
||||
var _ = wire.RegisterInterface(
|
||||
struct{ ConsensusMessage }{},
|
||||
wire.ConcreteType{&NewRoundStepMessage{}, msgTypeNewRoundStep},
|
||||
wire.ConcreteType{&CommitStepMessage{}, msgTypeCommitStep},
|
||||
wire.ConcreteType{&ProposalMessage{}, msgTypeProposal},
|
||||
wire.ConcreteType{&ProposalPOLMessage{}, msgTypeProposalPOL},
|
||||
wire.ConcreteType{&BlockPartMessage{}, msgTypeBlockPart},
|
||||
wire.ConcreteType{&VoteMessage{}, msgTypeVote},
|
||||
wire.ConcreteType{&HasVoteMessage{}, msgTypeHasVote},
|
||||
wire.ConcreteType{&VoteSetMaj23Message{}, msgTypeVoteSetMaj23},
|
||||
wire.ConcreteType{&VoteSetBitsMessage{}, msgTypeVoteSetBits},
|
||||
wire.ConcreteType{&ProposalHeartbeatMessage{}, msgTypeProposalHeartbeat},
|
||||
)
|
||||
func RegisterConsensusMessages(cdc *amino.Codec) {
|
||||
cdc.RegisterInterface((*ConsensusMessage)(nil), nil)
|
||||
cdc.RegisterConcrete(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage", nil)
|
||||
cdc.RegisterConcrete(&CommitStepMessage{}, "tendermint/CommitStep", nil)
|
||||
cdc.RegisterConcrete(&ProposalMessage{}, "tendermint/Proposal", nil)
|
||||
cdc.RegisterConcrete(&ProposalPOLMessage{}, "tendermint/ProposalPOL", nil)
|
||||
cdc.RegisterConcrete(&BlockPartMessage{}, "tendermint/BlockPart", nil)
|
||||
cdc.RegisterConcrete(&VoteMessage{}, "tendermint/Vote", nil)
|
||||
cdc.RegisterConcrete(&HasVoteMessage{}, "tendermint/HasVote", nil)
|
||||
cdc.RegisterConcrete(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23", nil)
|
||||
cdc.RegisterConcrete(&VoteSetBitsMessage{}, "tendermint/VoteSetBits", nil)
|
||||
cdc.RegisterConcrete(&ProposalHeartbeatMessage{}, "tendermint/ProposalHeartbeat", nil)
|
||||
}
|
||||
|
||||
// DecodeMessage decodes the given bytes into a ConsensusMessage.
|
||||
// TODO: check for unnecessary extra bytes at the end.
|
||||
func DecodeMessage(bz []byte) (msgType byte, msg ConsensusMessage, err error) {
|
||||
msgType = bz[0]
|
||||
n := new(int)
|
||||
r := bytes.NewReader(bz)
|
||||
msgI := wire.ReadBinary(struct{ ConsensusMessage }{}, r, maxConsensusMessageSize, n, &err)
|
||||
msg = msgI.(struct{ ConsensusMessage }).ConsensusMessage
|
||||
func DecodeMessage(bz []byte) (msg ConsensusMessage, err error) {
|
||||
if len(bz) > maxMsgSize {
|
||||
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
|
||||
len(bz), maxMsgSize)
|
||||
}
|
||||
err = cdc.UnmarshalBinaryBare(bz, &msg)
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -10,11 +10,13 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
"github.com/tendermint/abci/example/kvstore"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
p2pdummy "github.com/tendermint/tendermint/p2p/dummy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -121,13 +123,119 @@ func TestReactorProposalHeartbeats(t *testing.T) {
|
||||
}, css)
|
||||
}
|
||||
|
||||
// Test we record block parts from other peers
|
||||
func TestReactorRecordsBlockParts(t *testing.T) {
|
||||
// create dummy peer
|
||||
peer := p2pdummy.NewPeer()
|
||||
ps := NewPeerState(peer).SetLogger(log.TestingLogger())
|
||||
peer.Set(types.PeerStateKey, ps)
|
||||
|
||||
// create reactor
|
||||
css := randConsensusNet(1, "consensus_reactor_records_block_parts_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states
|
||||
reactor.SetEventBus(css[0].eventBus)
|
||||
reactor.SetLogger(log.TestingLogger())
|
||||
sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw })
|
||||
reactor.SetSwitch(sw)
|
||||
err := reactor.Start()
|
||||
require.NoError(t, err)
|
||||
defer reactor.Stop()
|
||||
|
||||
// 1) new block part
|
||||
parts := types.NewPartSetFromData(cmn.RandBytes(100), 10)
|
||||
msg := &BlockPartMessage{
|
||||
Height: 2,
|
||||
Round: 0,
|
||||
Part: parts.GetPart(0),
|
||||
}
|
||||
bz, err := cdc.MarshalBinaryBare(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(DataChannel, peer, bz)
|
||||
require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should have increased by 1")
|
||||
|
||||
// 2) block part with the same height, but different round
|
||||
msg.Round = 1
|
||||
|
||||
bz, err = cdc.MarshalBinaryBare(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(DataChannel, peer, bz)
|
||||
require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
|
||||
|
||||
// 3) block part from earlier height
|
||||
msg.Height = 1
|
||||
msg.Round = 0
|
||||
|
||||
bz, err = cdc.MarshalBinaryBare(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(DataChannel, peer, bz)
|
||||
require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
|
||||
}
|
||||
|
||||
// Test we record votes from other peers
|
||||
func TestReactorRecordsVotes(t *testing.T) {
|
||||
// create dummy peer
|
||||
peer := p2pdummy.NewPeer()
|
||||
ps := NewPeerState(peer).SetLogger(log.TestingLogger())
|
||||
peer.Set(types.PeerStateKey, ps)
|
||||
|
||||
// create reactor
|
||||
css := randConsensusNet(1, "consensus_reactor_records_votes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states
|
||||
reactor.SetEventBus(css[0].eventBus)
|
||||
reactor.SetLogger(log.TestingLogger())
|
||||
sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw })
|
||||
reactor.SetSwitch(sw)
|
||||
err := reactor.Start()
|
||||
require.NoError(t, err)
|
||||
defer reactor.Stop()
|
||||
_, val := css[0].state.Validators.GetByIndex(0)
|
||||
|
||||
// 1) new vote
|
||||
vote := &types.Vote{
|
||||
ValidatorIndex: 0,
|
||||
ValidatorAddress: val.Address,
|
||||
Height: 2,
|
||||
Round: 0,
|
||||
Timestamp: time.Now().UTC(),
|
||||
Type: types.VoteTypePrevote,
|
||||
BlockID: types.BlockID{},
|
||||
}
|
||||
bz, err := cdc.MarshalBinaryBare(&VoteMessage{vote})
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(VoteChannel, peer, bz)
|
||||
assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should have increased by 1")
|
||||
|
||||
// 2) vote with the same height, but different round
|
||||
vote.Round = 1
|
||||
|
||||
bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote})
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(VoteChannel, peer, bz)
|
||||
assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same")
|
||||
|
||||
// 3) vote from earlier height
|
||||
vote.Height = 1
|
||||
vote.Round = 0
|
||||
|
||||
bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote})
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(VoteChannel, peer, bz)
|
||||
assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same")
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------
|
||||
// ensure we can make blocks despite cycling a validator set
|
||||
|
||||
func TestReactorVotingPowerChange(t *testing.T) {
|
||||
nVals := 4
|
||||
logger := log.TestingLogger()
|
||||
css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentDummy)
|
||||
css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nVals)
|
||||
defer stopConsensusNet(logger, reactors, eventBuses)
|
||||
|
||||
@@ -146,7 +254,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
logger.Debug("---------------------------- Testing changing the voting power of one validator a few times")
|
||||
|
||||
val1PubKey := css[0].privValidator.GetPubKey()
|
||||
updateValidatorTx := dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 25)
|
||||
updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKey.Bytes(), 25)
|
||||
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
@@ -158,7 +266,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
|
||||
updateValidatorTx = dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 2)
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKey.Bytes(), 2)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
@@ -170,7 +278,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
|
||||
updateValidatorTx = dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 26)
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKey.Bytes(), 26)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||
@@ -186,7 +294,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
nPeers := 7
|
||||
nVals := 4
|
||||
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentDummy)
|
||||
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
|
||||
logger := log.TestingLogger()
|
||||
|
||||
@@ -208,7 +316,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
logger.Info("---------------------------- Testing adding one validator")
|
||||
|
||||
newValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
||||
newValidatorTx1 := dummy.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), testMinPower)
|
||||
newValidatorTx1 := kvstore.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), testMinPower)
|
||||
|
||||
// wait till everyone makes block 2
|
||||
// ensure the commit includes all validators
|
||||
@@ -234,7 +342,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
logger.Info("---------------------------- Testing changing the voting power of one validator")
|
||||
|
||||
updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
||||
updateValidatorTx1 := dummy.MakeValSetChangeTx(updateValidatorPubKey1.Bytes(), 25)
|
||||
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updateValidatorPubKey1.Bytes(), 25)
|
||||
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1)
|
||||
@@ -250,10 +358,10 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
logger.Info("---------------------------- Testing adding two validators at once")
|
||||
|
||||
newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey()
|
||||
newValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), testMinPower)
|
||||
newValidatorTx2 := kvstore.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), testMinPower)
|
||||
|
||||
newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey()
|
||||
newValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), testMinPower)
|
||||
newValidatorTx3 := kvstore.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), testMinPower)
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||
@@ -265,8 +373,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
//---------------------------------------------------------------------------
|
||||
logger.Info("---------------------------- Testing removing two validators at once")
|
||||
|
||||
removeValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), 0)
|
||||
removeValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), 0)
|
||||
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), 0)
|
||||
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), 0)
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
||||
@@ -301,7 +409,7 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
newBlock := newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block
|
||||
newBlock := newBlockI.(types.EventDataNewBlock).Block
|
||||
css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height)
|
||||
err := validateBlock(newBlock, activeVals)
|
||||
assert.Nil(t, err)
|
||||
@@ -322,7 +430,7 @@ func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]st
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
newBlock := newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block
|
||||
newBlock := newBlockI.(types.EventDataNewBlock).Block
|
||||
css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height)
|
||||
err := validateBlock(newBlock, activeVals)
|
||||
assert.Nil(t, err)
|
||||
@@ -332,7 +440,7 @@ func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]st
|
||||
// but they should be in order.
|
||||
for _, tx := range newBlock.Data.Txs {
|
||||
assert.EqualValues(t, txs[ntxs], tx)
|
||||
ntxs += 1
|
||||
ntxs++
|
||||
}
|
||||
|
||||
if ntxs == len(txs) {
|
||||
@@ -354,7 +462,7 @@ func waitForBlockWithUpdatedValsAndValidateIt(t *testing.T, n int, updatedVals m
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
newBlock = newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block
|
||||
newBlock = newBlockI.(types.EventDataNewBlock).Block
|
||||
if newBlock.LastCommit.Size() == len(updatedVals) {
|
||||
css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height)
|
||||
break LOOP
|
||||
|
@@ -2,6 +2,7 @@ package consensus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
@@ -25,20 +26,24 @@ import (
|
||||
var crc32c = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// Functionality to replay blocks and messages on recovery from a crash.
|
||||
// There are two general failure scenarios: failure during consensus, and failure while applying the block.
|
||||
// The former is handled by the WAL, the latter by the proxyApp Handshake on restart,
|
||||
// which ultimately hands off the work to the WAL.
|
||||
// There are two general failure scenarios:
|
||||
//
|
||||
// 1. failure during consensus
|
||||
// 2. failure while applying the block
|
||||
//
|
||||
// The former is handled by the WAL, the latter by the proxyApp Handshake on
|
||||
// restart, which ultimately hands off the work to the WAL.
|
||||
|
||||
//-----------------------------------------
|
||||
// recover from failure during consensus
|
||||
// by replaying messages from the WAL
|
||||
// 1. Recover from failure during consensus
|
||||
// (by replaying messages from the WAL)
|
||||
//-----------------------------------------
|
||||
|
||||
// Unmarshal and apply a single message to the consensus state
|
||||
// as if it were received in receiveRoutine
|
||||
// Lines that start with "#" are ignored.
|
||||
// NOTE: receiveRoutine should not be running
|
||||
// Unmarshal and apply a single message to the consensus state as if it were
|
||||
// received in receiveRoutine. Lines that start with "#" are ignored.
|
||||
// NOTE: receiveRoutine should not be running.
|
||||
func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan interface{}) error {
|
||||
// skip meta messages
|
||||
// Skip meta messages which exist for demarcating boundaries.
|
||||
if _, ok := msg.Msg.(EndHeightMessage); ok {
|
||||
return nil
|
||||
}
|
||||
@@ -88,17 +93,18 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan
|
||||
return nil
|
||||
}
|
||||
|
||||
// replay only those messages since the last block.
|
||||
// timeoutRoutine should run concurrently to read off tickChan
|
||||
// Replay only those messages since the last block. `timeoutRoutine` should
|
||||
// run concurrently to read off tickChan.
|
||||
func (cs *ConsensusState) catchupReplay(csHeight int64) error {
|
||||
// set replayMode
|
||||
|
||||
// Set replayMode to true so we don't log signing errors.
|
||||
cs.replayMode = true
|
||||
defer func() { cs.replayMode = false }()
|
||||
|
||||
// Ensure that ENDHEIGHT for this height doesn't exist.
|
||||
// Ensure that #ENDHEIGHT for this height doesn't exist.
|
||||
// NOTE: This is just a sanity check. As far as we know things work fine
|
||||
// without it, and Handshake could reuse ConsensusState if it weren't for
|
||||
// this check (since we can crash after writing ENDHEIGHT).
|
||||
// this check (since we can crash after writing #ENDHEIGHT).
|
||||
//
|
||||
// Ignore data corruption errors since this is a sanity check.
|
||||
gr, found, err := cs.wal.SearchForEndHeight(csHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
|
||||
@@ -111,10 +117,10 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
|
||||
}
|
||||
}
|
||||
if found {
|
||||
return fmt.Errorf("WAL should not contain #ENDHEIGHT %d.", csHeight)
|
||||
return fmt.Errorf("WAL should not contain #ENDHEIGHT %d", csHeight)
|
||||
}
|
||||
|
||||
// Search for last height marker
|
||||
// Search for last height marker.
|
||||
//
|
||||
// Ignore data corruption errors in previous heights because we only care about last height
|
||||
gr, found, err = cs.wal.SearchForEndHeight(csHeight-1, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
|
||||
@@ -124,7 +130,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
|
||||
return err
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1)
|
||||
return fmt.Errorf("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d", csHeight, csHeight-1)
|
||||
}
|
||||
defer gr.Close() // nolint: errcheck
|
||||
|
||||
@@ -181,22 +187,33 @@ func makeHeightSearchFunc(height int64) auto.SearchFunc {
|
||||
}
|
||||
}*/
|
||||
|
||||
//----------------------------------------------
|
||||
// Recover from failure during block processing
|
||||
// by handshaking with the app to figure out where
|
||||
// we were last and using the WAL to recover there
|
||||
//---------------------------------------------------
|
||||
// 2. Recover from failure while applying the block.
|
||||
// (by handshaking with the app to figure out where
|
||||
// we were last, and using the WAL to recover there.)
|
||||
//---------------------------------------------------
|
||||
|
||||
type Handshaker struct {
|
||||
stateDB dbm.DB
|
||||
initialState sm.State
|
||||
store types.BlockStore
|
||||
appState json.RawMessage
|
||||
logger log.Logger
|
||||
|
||||
nBlocks int // number of blocks applied to the state
|
||||
}
|
||||
|
||||
func NewHandshaker(stateDB dbm.DB, state sm.State, store types.BlockStore) *Handshaker {
|
||||
return &Handshaker{stateDB, state, store, log.NewNopLogger(), 0}
|
||||
func NewHandshaker(stateDB dbm.DB, state sm.State,
|
||||
store types.BlockStore, appState json.RawMessage) *Handshaker {
|
||||
|
||||
return &Handshaker{
|
||||
stateDB: stateDB,
|
||||
initialState: state,
|
||||
store: store,
|
||||
appState: appState,
|
||||
logger: log.NewNopLogger(),
|
||||
nBlocks: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handshaker) SetLogger(l log.Logger) {
|
||||
@@ -209,7 +226,8 @@ func (h *Handshaker) NBlocks() int {
|
||||
|
||||
// TODO: retry the handshake/replay if it fails ?
|
||||
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
// handshake is done via info request on the query conn
|
||||
|
||||
// Handshake is done via ABCI Info on the query conn.
|
||||
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{version.Version})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error calling Info: %v", err)
|
||||
@@ -223,15 +241,16 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
|
||||
h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
|
||||
|
||||
// TODO: check version
|
||||
// TODO: check app version.
|
||||
|
||||
// replay blocks up to the latest in the blockstore
|
||||
// Replay blocks up to the latest in the blockstore.
|
||||
_, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error on replay: %v", err)
|
||||
}
|
||||
|
||||
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
|
||||
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced",
|
||||
"appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
|
||||
|
||||
// TODO: (on restart) replay mempool
|
||||
|
||||
@@ -239,7 +258,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
}
|
||||
|
||||
// Replay all blocks since appBlockHeight and ensure the result matches the current state.
|
||||
// Returns the final AppHash or an error
|
||||
// Returns the final AppHash or an error.
|
||||
func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) {
|
||||
|
||||
storeBlockHeight := h.store.Height()
|
||||
@@ -249,9 +268,12 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
|
||||
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain
|
||||
if appBlockHeight == 0 {
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
// TODO: get the genesis bytes (https://github.com/tendermint/tendermint/issues/1224)
|
||||
var genesisBytes []byte
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators, genesisBytes}); err != nil {
|
||||
req := abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
AppStateBytes: h.appState,
|
||||
}
|
||||
_, err := proxyApp.Consensus().InitChainSync(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -300,7 +322,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
|
||||
// We haven't run Commit (both the state and app are one block behind),
|
||||
// so replayBlock with the real app.
|
||||
// NOTE: We could instead use the cs.WAL on cs.Start,
|
||||
// but we'd have to allow the WAL to replay a block that wrote it's ENDHEIGHT
|
||||
// but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT
|
||||
h.logger.Info("Replay last block using real app")
|
||||
state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus())
|
||||
return state.AppHash, err
|
||||
@@ -338,7 +360,7 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
|
||||
var err error
|
||||
finalBlock := storeBlockHeight
|
||||
if mutateState {
|
||||
finalBlock -= 1
|
||||
finalBlock--
|
||||
}
|
||||
for i := appBlockHeight + 1; i <= finalBlock; i++ {
|
||||
h.logger.Info("Applying block", "height", i)
|
||||
@@ -348,7 +370,7 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h.nBlocks += 1
|
||||
h.nBlocks++
|
||||
}
|
||||
|
||||
if mutateState {
|
||||
@@ -376,7 +398,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
|
||||
return sm.State{}, err
|
||||
}
|
||||
|
||||
h.nBlocks += 1
|
||||
h.nBlocks++
|
||||
|
||||
return state, nil
|
||||
}
|
||||
@@ -415,7 +437,7 @@ type mockProxyApp struct {
|
||||
|
||||
func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx {
|
||||
r := mock.abciResponses.DeliverTx[mock.txCount]
|
||||
mock.txCount += 1
|
||||
mock.txCount++
|
||||
return *r
|
||||
}
|
||||
|
||||
|
@@ -29,6 +29,7 @@ const (
|
||||
//--------------------------------------------------------
|
||||
// replay messages interactively or all at once
|
||||
|
||||
// replay the wal file
|
||||
func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) {
|
||||
consensusState := newConsensusStateForReplay(config, csConfig)
|
||||
|
||||
@@ -87,9 +88,9 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
|
||||
}
|
||||
|
||||
if nextN > 0 {
|
||||
nextN -= 1
|
||||
nextN--
|
||||
}
|
||||
pb.count += 1
|
||||
pb.count++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -153,7 +154,7 @@ func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
|
||||
if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil {
|
||||
return err
|
||||
}
|
||||
pb.count += 1
|
||||
pb.count++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -197,13 +198,12 @@ func (pb *playback) replayConsoleLoop() int {
|
||||
|
||||
if len(tokens) == 1 {
|
||||
return 0
|
||||
}
|
||||
i, err := strconv.Atoi(tokens[1])
|
||||
if err != nil {
|
||||
fmt.Println("next takes an integer argument")
|
||||
} else {
|
||||
i, err := strconv.Atoi(tokens[1])
|
||||
if err != nil {
|
||||
fmt.Println("next takes an integer argument")
|
||||
} else {
|
||||
return i
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
case "back":
|
||||
@@ -263,7 +263,7 @@ func (pb *playback) replayConsoleLoop() int {
|
||||
case "locked_block":
|
||||
fmt.Printf("%v %v\n", rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort())
|
||||
case "votes":
|
||||
fmt.Println(rs.Votes.StringIndented(" "))
|
||||
fmt.Println(rs.Votes.StringIndented(" "))
|
||||
|
||||
default:
|
||||
fmt.Println("Unknown option", tokens[1])
|
||||
@@ -287,14 +287,19 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
|
||||
// Get State
|
||||
stateDB := dbm.NewDB("state", dbType, config.DBDir())
|
||||
state, err := sm.MakeGenesisStateFromFile(config.GenesisFile())
|
||||
gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
||||
if err != nil {
|
||||
cmn.Exit(err.Error())
|
||||
}
|
||||
state, err := sm.MakeGenesisState(gdoc)
|
||||
if err != nil {
|
||||
cmn.Exit(err.Error())
|
||||
}
|
||||
|
||||
// Create proxyAppConn connection (consensus, mempool, query)
|
||||
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
|
||||
proxyApp := proxy.NewAppConns(clientCreator, NewHandshaker(stateDB, state, blockStore))
|
||||
proxyApp := proxy.NewAppConns(clientCreator,
|
||||
NewHandshaker(stateDB, state, blockStore, gdoc.AppState()))
|
||||
err = proxyApp.Start()
|
||||
if err != nil {
|
||||
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
|
||||
|
@@ -15,10 +15,9 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
"github.com/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/abci/types"
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
wire "github.com/tendermint/go-wire"
|
||||
auto "github.com/tendermint/tmlibs/autofile"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
dbm "github.com/tendermint/tmlibs/db"
|
||||
@@ -27,6 +26,7 @@ import (
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
)
|
||||
|
||||
@@ -55,12 +55,12 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64,
|
||||
logger := log.TestingLogger()
|
||||
state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile())
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, dummy.NewDummyApplication(), blockDB)
|
||||
cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB)
|
||||
cs.SetLogger(logger)
|
||||
|
||||
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
|
||||
// fmt.Printf("====== WAL: \n\r%s\n", bytes)
|
||||
t.Logf("====== WAL: \n\r%s\n", bytes)
|
||||
t.Logf("====== WAL: \n\r%X\n", bytes)
|
||||
|
||||
err := cs.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -141,7 +141,7 @@ LOOP:
|
||||
state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
blockDB := dbm.NewMemDB()
|
||||
cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, dummy.NewDummyApplication(), blockDB)
|
||||
cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB)
|
||||
cs.SetLogger(logger)
|
||||
|
||||
// start sending transactions
|
||||
@@ -325,9 +325,9 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
walFile := tempWALWithData(walBody)
|
||||
config.Consensus.SetWalFile(walFile)
|
||||
|
||||
privVal := types.LoadPrivValidatorFS(config.PrivValidatorFile())
|
||||
privVal := pvm.LoadFilePV(config.PrivValidatorFile())
|
||||
|
||||
wal, err := NewWAL(walFile, false)
|
||||
wal, err := NewWAL(walFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -351,8 +351,8 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
latestAppHash := state.AppHash
|
||||
|
||||
// make a new client creator
|
||||
dummyApp := dummy.NewPersistentDummyApplication(path.Join(config.DBDir(), "2"))
|
||||
clientCreator2 := proxy.NewLocalClientCreator(dummyApp)
|
||||
kvstoreApp := kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "2"))
|
||||
clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp)
|
||||
if nBlocks > 0 {
|
||||
// run nBlocks against a new client to build up the app state.
|
||||
// use a throwaway tendermint state
|
||||
@@ -362,7 +362,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
}
|
||||
|
||||
// now start the app using the handshake - it should sync
|
||||
handshaker := NewHandshaker(stateDB, state, store)
|
||||
handshaker := NewHandshaker(stateDB, state, store, nil)
|
||||
proxyApp := proxy.NewAppConns(clientCreator2, handshaker)
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
@@ -382,9 +382,9 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
|
||||
expectedBlocksToSync := NUM_BLOCKS - nBlocks
|
||||
if nBlocks == NUM_BLOCKS && mode > 0 {
|
||||
expectedBlocksToSync += 1
|
||||
expectedBlocksToSync++
|
||||
} else if nBlocks > 0 && mode == 1 {
|
||||
expectedBlocksToSync += 1
|
||||
expectedBlocksToSync++
|
||||
}
|
||||
|
||||
if handshaker.NBlocks() != expectedBlocksToSync {
|
||||
@@ -412,9 +412,9 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
// TODO: get the genesis bytes (https://github.com/tendermint/tendermint/issues/1224)
|
||||
var genesisBytes []byte
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators, genesisBytes}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -432,7 +432,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
|
||||
}
|
||||
|
||||
if mode == 2 {
|
||||
// update the dummy height and apphash
|
||||
// update the kvstore height and apphash
|
||||
// as if we ran commit but not
|
||||
state = applyBlock(stateDB, state, chain[nBlocks-1], proxyApp)
|
||||
}
|
||||
@@ -442,16 +442,16 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
|
||||
|
||||
func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, mode uint) sm.State {
|
||||
// run the whole chain against this client to build up the tendermint state
|
||||
clientCreator := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.DBDir(), "1")))
|
||||
clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "1")))
|
||||
proxyApp := proxy.NewAppConns(clientCreator, nil) // sm.NewHandshaker(config, state, store, ReplayLastBlock))
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
// TODO: get the genesis bytes (https://github.com/tendermint/tendermint/issues/1224)
|
||||
var genesisBytes []byte
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators, genesisBytes}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -519,8 +519,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
case EndHeightMessage:
|
||||
// if its not the first one, we have a full block
|
||||
if thisBlockParts != nil {
|
||||
var n int
|
||||
block := wire.ReadBinary(&types.Block{}, thisBlockParts.GetReader(), 0, &n, &err).(*types.Block)
|
||||
var block = new(types.Block)
|
||||
_, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -533,12 +533,12 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
commits = append(commits, thisBlockCommit)
|
||||
height += 1
|
||||
height++
|
||||
}
|
||||
case *types.PartSetHeader:
|
||||
thisBlockParts = types.NewPartSetFromHeader(*p)
|
||||
case *types.Part:
|
||||
_, err := thisBlockParts.AddPart(p, false)
|
||||
_, err := thisBlockParts.AddPart(p)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -552,8 +552,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
}
|
||||
}
|
||||
// grab the last block too
|
||||
var n int
|
||||
block := wire.ReadBinary(&types.Block{}, thisBlockParts.GetReader(), 0, &n, &err).(*types.Block)
|
||||
var block = new(types.Block)
|
||||
_, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@@ -10,8 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
fail "github.com/ebuchman/fail-test"
|
||||
|
||||
wire "github.com/tendermint/go-wire"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
@@ -170,18 +168,31 @@ func (cs *ConsensusState) GetState() sm.State {
|
||||
return cs.state.Copy()
|
||||
}
|
||||
|
||||
// GetRoundState returns a copy of the internal consensus state.
|
||||
// GetRoundState returns a shallow copy of the internal consensus state.
|
||||
func (cs *ConsensusState) GetRoundState() *cstypes.RoundState {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
return cs.getRoundState()
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) getRoundState() *cstypes.RoundState {
|
||||
rs := cs.RoundState // copy
|
||||
return &rs
|
||||
}
|
||||
|
||||
// GetRoundStateJSON returns a json of RoundState, marshalled using go-amino.
|
||||
func (cs *ConsensusState) GetRoundStateJSON() ([]byte, error) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
|
||||
return cdc.MarshalJSON(cs.RoundState)
|
||||
}
|
||||
|
||||
// GetRoundStateSimpleJSON returns a json of RoundStateSimple, marshalled using go-amino.
|
||||
func (cs *ConsensusState) GetRoundStateSimpleJSON() ([]byte, error) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
|
||||
return cdc.MarshalJSON(cs.RoundState.RoundStateSimple())
|
||||
}
|
||||
|
||||
// GetValidators returns a copy of the current validators.
|
||||
func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) {
|
||||
cs.mtx.Lock()
|
||||
@@ -290,7 +301,7 @@ func (cs *ConsensusState) Wait() {
|
||||
|
||||
// OpenWAL opens a file to log all consensus messages and timeouts for deterministic accountability
|
||||
func (cs *ConsensusState) OpenWAL(walFile string) (WAL, error) {
|
||||
wal, err := NewWAL(walFile, cs.config.WalLight)
|
||||
wal, err := NewWAL(walFile)
|
||||
if err != nil {
|
||||
cs.Logger.Error("Failed to open WAL for consensus state", "wal", walFile, "err", err)
|
||||
return nil, err
|
||||
@@ -477,6 +488,9 @@ func (cs *ConsensusState) updateToState(state sm.State) {
|
||||
cs.LockedRound = 0
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
cs.ValidRound = 0
|
||||
cs.ValidBlock = nil
|
||||
cs.ValidBlockParts = nil
|
||||
cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators)
|
||||
cs.CommitRound = -1
|
||||
cs.LastCommit = lastPrecommits
|
||||
@@ -491,7 +505,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
|
||||
func (cs *ConsensusState) newStep() {
|
||||
rs := cs.RoundStateEvent()
|
||||
cs.wal.Save(rs)
|
||||
cs.nSteps += 1
|
||||
cs.nSteps++
|
||||
// newStep is called by updateToStep in NewConsensusState before the eventBus is set!
|
||||
if cs.eventBus != nil {
|
||||
cs.eventBus.PublishEventNewRoundStep(rs)
|
||||
@@ -570,8 +584,9 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
|
||||
err = cs.setProposal(msg.Proposal)
|
||||
case *BlockPartMessage:
|
||||
// if the proposal is complete, we'll enterPrevote or tryFinalizeCommit
|
||||
_, err = cs.addProposalBlockPart(msg.Height, msg.Part, peerID != "")
|
||||
_, err = cs.addProposalBlockPart(msg.Height, msg.Part)
|
||||
if err != nil && msg.Round != cs.Round {
|
||||
cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round)
|
||||
err = nil
|
||||
}
|
||||
case *VoteMessage:
|
||||
@@ -580,6 +595,10 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
|
||||
err := cs.tryAddVote(msg.Vote, peerID)
|
||||
if err == ErrAddingVote {
|
||||
// TODO: punish peer
|
||||
// We probably don't want to stop the peer here. The vote does not
|
||||
// necessarily comes from a malicious peer but can be just broadcasted by
|
||||
// a typical peer.
|
||||
// https://github.com/tendermint/tendermint/issues/1281
|
||||
}
|
||||
|
||||
// NOTE: the vote is broadcast to peers by the reactor listening
|
||||
@@ -592,7 +611,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
|
||||
cs.Logger.Error("Unknown msg type", reflect.TypeOf(msg))
|
||||
}
|
||||
if err != nil {
|
||||
cs.Logger.Error("Error with msg", "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg)
|
||||
cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -649,16 +668,18 @@ func (cs *ConsensusState) handleTxsAvailable(height int64) {
|
||||
// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round)
|
||||
// NOTE: cs.StartTime was already set for height.
|
||||
func (cs *ConsensusState) enterNewRound(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
|
||||
cs.Logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
|
||||
if now := time.Now(); cs.StartTime.After(now) {
|
||||
cs.Logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now)
|
||||
logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now)
|
||||
}
|
||||
|
||||
cs.Logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
// Increment validators if necessary
|
||||
validators := cs.Validators
|
||||
@@ -677,6 +698,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
|
||||
// and meanwhile we might have received a proposal
|
||||
// for round 0.
|
||||
} else {
|
||||
logger.Info("Resetting Proposal info")
|
||||
cs.Proposal = nil
|
||||
cs.ProposalBlock = nil
|
||||
cs.ProposalBlockParts = nil
|
||||
@@ -713,11 +735,7 @@ func (cs *ConsensusState) needProofBlock(height int64) bool {
|
||||
func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
|
||||
counter := 0
|
||||
addr := cs.privValidator.GetAddress()
|
||||
valIndex, v := cs.Validators.GetByAddress(addr)
|
||||
if v == nil {
|
||||
// not a validator
|
||||
valIndex = -1
|
||||
}
|
||||
valIndex, _ := cs.Validators.GetByAddress(addr)
|
||||
chainID := cs.state.ChainID
|
||||
for {
|
||||
rs := cs.GetRoundState()
|
||||
@@ -734,7 +752,7 @@ func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
|
||||
}
|
||||
cs.privValidator.SignHeartbeat(chainID, heartbeat)
|
||||
cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat})
|
||||
counter += 1
|
||||
counter++
|
||||
time.Sleep(proposalHeartbeatIntervalSeconds * time.Second)
|
||||
}
|
||||
}
|
||||
@@ -743,11 +761,13 @@ func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
|
||||
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
|
||||
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
|
||||
func (cs *ConsensusState) enterPropose(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
|
||||
cs.Logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
cs.Logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPropose:
|
||||
@@ -767,22 +787,22 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
|
||||
|
||||
// Nothing more to do if we're not a validator
|
||||
if cs.privValidator == nil {
|
||||
cs.Logger.Debug("This node is not a validator")
|
||||
logger.Debug("This node is not a validator")
|
||||
return
|
||||
}
|
||||
|
||||
// if not a validator, we're done
|
||||
if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
|
||||
cs.Logger.Debug("This node is not a validator")
|
||||
logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators)
|
||||
return
|
||||
}
|
||||
cs.Logger.Debug("This node is a validator")
|
||||
logger.Debug("This node is a validator")
|
||||
|
||||
if cs.isProposer() {
|
||||
cs.Logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
||||
logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
||||
cs.decideProposal(height, round)
|
||||
} else {
|
||||
cs.Logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
||||
logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -798,6 +818,9 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
|
||||
if cs.LockedBlock != nil {
|
||||
// If we're locked onto a block, just choose that.
|
||||
block, blockParts = cs.LockedBlock, cs.LockedBlockParts
|
||||
} else if cs.ValidBlock != nil {
|
||||
// If there is valid block, choose that.
|
||||
block, blockParts = cs.ValidBlock, cs.ValidBlockParts
|
||||
} else {
|
||||
// Create a new proposal block from state/txs from the mempool.
|
||||
block, blockParts = cs.createProposalBlock()
|
||||
@@ -842,10 +865,10 @@ func (cs *ConsensusState) isProposalComplete() bool {
|
||||
// make sure we have the prevotes from it too
|
||||
if cs.Proposal.POLRound < 0 {
|
||||
return true
|
||||
} else {
|
||||
// if this is false the proposer is lying or we haven't received the POL yet
|
||||
return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority()
|
||||
}
|
||||
// if this is false the proposer is lying or we haven't received the POL yet
|
||||
return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority()
|
||||
|
||||
}
|
||||
|
||||
// Create the next block to propose and return it.
|
||||
@@ -942,14 +965,16 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) {
|
||||
|
||||
// Enter: any +2/3 prevotes at next round.
|
||||
func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
|
||||
cs.Logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
if !cs.Votes.Prevotes(round).HasTwoThirdsAny() {
|
||||
cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
|
||||
}
|
||||
cs.Logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrevoteWait:
|
||||
@@ -968,12 +993,14 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
|
||||
// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil,
|
||||
// else, precommit nil otherwise.
|
||||
func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
|
||||
cs.Logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
|
||||
cs.Logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommit:
|
||||
@@ -981,23 +1008,24 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
cs.newStep()
|
||||
}()
|
||||
|
||||
// check for a polka
|
||||
blockID, ok := cs.Votes.Prevotes(round).TwoThirdsMajority()
|
||||
|
||||
// If we don't have a polka, we must precommit nil
|
||||
// If we don't have a polka, we must precommit nil.
|
||||
if !ok {
|
||||
if cs.LockedBlock != nil {
|
||||
cs.Logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
|
||||
logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
|
||||
} else {
|
||||
cs.Logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
|
||||
logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
|
||||
}
|
||||
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
// At this point +2/3 prevoted for a particular block or nil
|
||||
// At this point +2/3 prevoted for a particular block or nil.
|
||||
cs.eventBus.PublishEventPolka(cs.RoundStateEvent())
|
||||
|
||||
// the latest POLRound should be this round
|
||||
// the latest POLRound should be this round.
|
||||
polRound, _ := cs.Votes.POLInfo()
|
||||
if polRound < round {
|
||||
cmn.PanicSanity(cmn.Fmt("This POLRound should be %v but got %", round, polRound))
|
||||
@@ -1006,9 +1034,9 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
// +2/3 prevoted nil. Unlock and precommit nil.
|
||||
if len(blockID.Hash) == 0 {
|
||||
if cs.LockedBlock == nil {
|
||||
cs.Logger.Info("enterPrecommit: +2/3 prevoted for nil.")
|
||||
logger.Info("enterPrecommit: +2/3 prevoted for nil.")
|
||||
} else {
|
||||
cs.Logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking")
|
||||
logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking")
|
||||
cs.LockedRound = 0
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
@@ -1022,7 +1050,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
|
||||
// If we're already locked on that block, precommit it, and update the LockedRound
|
||||
if cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking")
|
||||
logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking")
|
||||
cs.LockedRound = round
|
||||
cs.eventBus.PublishEventRelock(cs.RoundStateEvent())
|
||||
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
|
||||
@@ -1031,7 +1059,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
|
||||
// If +2/3 prevoted for proposal block, stage and precommit it
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
|
||||
logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
|
||||
// Validate the block.
|
||||
if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil {
|
||||
cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
|
||||
@@ -1061,14 +1089,16 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
|
||||
// Enter: any +2/3 precommits for next round.
|
||||
func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) {
|
||||
cs.Logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
if !cs.Votes.Precommits(round).HasTwoThirdsAny() {
|
||||
cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
|
||||
}
|
||||
cs.Logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommitWait:
|
||||
@@ -1083,11 +1113,13 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
|
||||
|
||||
// Enter: +2/3 precommits for block
|
||||
func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
||||
logger := cs.Logger.With("height", height, "commitRound", commitRound)
|
||||
|
||||
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
|
||||
cs.Logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
cs.Logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterCommit:
|
||||
@@ -1110,6 +1142,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
||||
// Move them over to ProposalBlock if they match the commit hash,
|
||||
// otherwise they'll be cleared in updateToState.
|
||||
if cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
logger.Info("Commit is for locked block. Set ProposalBlock=LockedBlock", "blockHash", blockID.Hash)
|
||||
cs.ProposalBlock = cs.LockedBlock
|
||||
cs.ProposalBlockParts = cs.LockedBlockParts
|
||||
}
|
||||
@@ -1117,6 +1150,7 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
||||
// If we don't have the block being committed, set up to get it.
|
||||
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) {
|
||||
logger.Info("Commit is for a block we don't know about. Set ProposalBlock=nil", "proposal", cs.ProposalBlock.Hash(), "commit", blockID.Hash)
|
||||
// We're getting the wrong block.
|
||||
// Set up ProposalBlockParts and keep waiting.
|
||||
cs.ProposalBlock = nil
|
||||
@@ -1129,19 +1163,21 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
||||
|
||||
// If we have the block AND +2/3 commits for it, finalize.
|
||||
func (cs *ConsensusState) tryFinalizeCommit(height int64) {
|
||||
logger := cs.Logger.With("height", height)
|
||||
|
||||
if cs.Height != height {
|
||||
cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
|
||||
}
|
||||
|
||||
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
||||
if !ok || len(blockID.Hash) == 0 {
|
||||
cs.Logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.", "height", height)
|
||||
logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.")
|
||||
return
|
||||
}
|
||||
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
// TODO: this happens every time if we're not a validator (ugly logs)
|
||||
// TODO: ^^ wait, why does it matter that we're a validator?
|
||||
cs.Logger.Info("Attempt to finalize failed. We don't have the commit block.", "height", height, "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash)
|
||||
logger.Info("Attempt to finalize failed. We don't have the commit block.", "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1192,23 +1228,28 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Finish writing to the WAL for this height.
|
||||
// NOTE: If we fail before writing this, we'll never write it,
|
||||
// and just recover by running ApplyBlock in the Handshake.
|
||||
// If we moved it before persisting the block, we'd have to allow
|
||||
// WAL replay for blocks with an #ENDHEIGHT
|
||||
// As is, ConsensusState should not be started again
|
||||
// until we successfully call ApplyBlock (ie. here or in Handshake after restart)
|
||||
// Write EndHeightMessage{} for this height, implying that the blockstore
|
||||
// has saved the block.
|
||||
//
|
||||
// If we crash before writing this EndHeightMessage{}, we will recover by
|
||||
// running ApplyBlock during the ABCI handshake when we restart. If we
|
||||
// didn't save the block to the blockstore before writing
|
||||
// EndHeightMessage{}, we'd have to change WAL replay -- currently it
|
||||
// complains about replaying for heights where an #ENDHEIGHT entry already
|
||||
// exists.
|
||||
//
|
||||
// Either way, the ConsensusState should not be resumed until we
|
||||
// successfully call ApplyBlock (ie. later here, or in Handshake after
|
||||
// restart).
|
||||
cs.wal.Save(EndHeightMessage{height})
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Create a copy of the state for staging
|
||||
// and an event cache for txs
|
||||
// Create a copy of the state for staging and an event cache for txs.
|
||||
stateCopy := cs.state.Copy()
|
||||
|
||||
// Execute and commit the block, update and save the state, and update the mempool.
|
||||
// NOTE: the block.AppHash wont reflect these txs until the next block
|
||||
// NOTE The block.AppHash wont reflect these txs until the next block.
|
||||
var err error
|
||||
stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{block.Hash(), blockParts.Header()}, block)
|
||||
if err != nil {
|
||||
@@ -1263,40 +1304,62 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
|
||||
}
|
||||
|
||||
// Verify signature
|
||||
if !cs.Validators.GetProposer().PubKey.VerifyBytes(types.SignBytes(cs.state.ChainID, proposal), proposal.Signature) {
|
||||
if !cs.Validators.GetProposer().PubKey.VerifyBytes(proposal.SignBytes(cs.state.ChainID), proposal.Signature) {
|
||||
return ErrInvalidProposalSignature
|
||||
}
|
||||
|
||||
cs.Proposal = proposal
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockPartsHeader)
|
||||
cs.Logger.Info("Received proposal", "proposal", proposal)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NOTE: block is not necessarily valid.
|
||||
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block.
|
||||
func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, verify bool) (added bool, err error) {
|
||||
func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) (added bool, err error) {
|
||||
// Blocks might be reused, so round mismatch is OK
|
||||
if cs.Height != height {
|
||||
cs.Logger.Debug("Received block part from wrong height", "height", height)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// We're not expecting a block part.
|
||||
if cs.ProposalBlockParts == nil {
|
||||
cs.Logger.Info("Received a block part when we're not expecting any", "height", height)
|
||||
return false, nil // TODO: bad peer? Return error?
|
||||
}
|
||||
|
||||
added, err = cs.ProposalBlockParts.AddPart(part, verify)
|
||||
added, err = cs.ProposalBlockParts.AddPart(part)
|
||||
if err != nil {
|
||||
return added, err
|
||||
}
|
||||
if added && cs.ProposalBlockParts.IsComplete() {
|
||||
// Added and completed!
|
||||
var n int
|
||||
var err error
|
||||
cs.ProposalBlock = wire.ReadBinary(&types.Block{}, cs.ProposalBlockParts.GetReader(),
|
||||
cs.state.ConsensusParams.BlockSize.MaxBytes, &n, &err).(*types.Block)
|
||||
_, err = cdc.UnmarshalBinaryReader(cs.ProposalBlockParts.GetReader(), &cs.ProposalBlock, int64(cs.state.ConsensusParams.BlockSize.MaxBytes))
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
// NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal
|
||||
cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
|
||||
|
||||
// Update Valid* if we can.
|
||||
prevotes := cs.Votes.Prevotes(cs.Round)
|
||||
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
|
||||
if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) {
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Info("Updating valid block to new proposal block",
|
||||
"valid-round", cs.Round, "valid-block-hash", cs.ProposalBlock.Hash())
|
||||
cs.ValidRound = cs.Round
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||
}
|
||||
// TODO: In case there is +2/3 majority in Prevotes set for some
|
||||
// block and cs.ProposalBlock contains different block, either
|
||||
// proposer is faulty or voting power of faulty processes is more
|
||||
// than 1/3. We should trigger in the future accountability
|
||||
// procedure at this point.
|
||||
}
|
||||
|
||||
if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() {
|
||||
// Move onto the next step
|
||||
cs.enterPrevote(height, cs.Round)
|
||||
@@ -1304,7 +1367,7 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, v
|
||||
// If we're waiting on the proposal block...
|
||||
cs.tryFinalizeCommit(height)
|
||||
}
|
||||
return true, err
|
||||
return true, nil
|
||||
}
|
||||
return added, nil
|
||||
}
|
||||
@@ -1349,99 +1412,127 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
return added, ErrVoteHeightMismatch
|
||||
}
|
||||
added, err = cs.LastCommit.AddVote(vote)
|
||||
if added {
|
||||
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
if !added {
|
||||
return added, err
|
||||
}
|
||||
|
||||
// if we can skip timeoutCommit and have all the votes now,
|
||||
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
|
||||
// go straight to new round (skip timeout commit)
|
||||
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
|
||||
cs.enterNewRound(cs.Height, 0)
|
||||
}
|
||||
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
|
||||
// if we can skip timeoutCommit and have all the votes now,
|
||||
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
|
||||
// go straight to new round (skip timeout commit)
|
||||
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
|
||||
cs.enterNewRound(cs.Height, 0)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// A prevote/precommit for this height?
|
||||
if vote.Height == cs.Height {
|
||||
height := cs.Height
|
||||
added, err = cs.Votes.AddVote(vote, peerID)
|
||||
if added {
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
// Height mismatch is ignored.
|
||||
// Not necessarily a bad peer, but not favourable behaviour.
|
||||
if vote.Height != cs.Height {
|
||||
err = ErrVoteHeightMismatch
|
||||
cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
switch vote.Type {
|
||||
case types.VoteTypePrevote:
|
||||
prevotes := cs.Votes.Prevotes(vote.Round)
|
||||
cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort())
|
||||
// First, unlock if prevotes is a valid POL.
|
||||
// >> lockRound < POLRound <= unlockOrChangeLockRound (see spec)
|
||||
// NOTE: If (lockRound < POLRound) but !(POLRound <= unlockOrChangeLockRound),
|
||||
// we'll still enterNewRound(H,vote.R) and enterPrecommit(H,vote.R) to process it
|
||||
// there.
|
||||
if (cs.LockedBlock != nil) && (cs.LockedRound < vote.Round) && (vote.Round <= cs.Round) {
|
||||
blockID, ok := prevotes.TwoThirdsMajority()
|
||||
if ok && !cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
|
||||
cs.LockedRound = 0
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
|
||||
}
|
||||
}
|
||||
if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() {
|
||||
// Round-skip over to PrevoteWait or goto Precommit.
|
||||
cs.enterNewRound(height, vote.Round) // if the vote is ahead of us
|
||||
if prevotes.HasTwoThirdsMajority() {
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
} else {
|
||||
cs.enterPrevote(height, vote.Round) // if the vote is ahead of us
|
||||
cs.enterPrevoteWait(height, vote.Round)
|
||||
}
|
||||
} else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round {
|
||||
// If the proposal is now complete, enter prevote of cs.Round.
|
||||
if cs.isProposalComplete() {
|
||||
cs.enterPrevote(height, cs.Round)
|
||||
}
|
||||
}
|
||||
case types.VoteTypePrecommit:
|
||||
precommits := cs.Votes.Precommits(vote.Round)
|
||||
cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
|
||||
blockID, ok := precommits.TwoThirdsMajority()
|
||||
if ok {
|
||||
if len(blockID.Hash) == 0 {
|
||||
cs.enterNewRound(height, vote.Round+1)
|
||||
} else {
|
||||
cs.enterNewRound(height, vote.Round)
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
cs.enterCommit(height, vote.Round)
|
||||
|
||||
if cs.config.SkipTimeoutCommit && precommits.HasAll() {
|
||||
// if we have all the votes now,
|
||||
// go straight to new round (skip timeout commit)
|
||||
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
|
||||
cs.enterNewRound(cs.Height, 0)
|
||||
}
|
||||
|
||||
}
|
||||
} else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() {
|
||||
cs.enterNewRound(height, vote.Round)
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
cs.enterPrecommitWait(height, vote.Round)
|
||||
}
|
||||
default:
|
||||
cmn.PanicSanity(cmn.Fmt("Unexpected vote type %X", vote.Type)) // Should not happen.
|
||||
}
|
||||
}
|
||||
height := cs.Height
|
||||
added, err = cs.Votes.AddVote(vote, peerID)
|
||||
if !added {
|
||||
// Either duplicate, or error upon cs.Votes.AddByIndex()
|
||||
return
|
||||
} else {
|
||||
err = ErrVoteHeightMismatch
|
||||
}
|
||||
|
||||
// Height mismatch, bad peer?
|
||||
cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err)
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
|
||||
switch vote.Type {
|
||||
case types.VoteTypePrevote:
|
||||
prevotes := cs.Votes.Prevotes(vote.Round)
|
||||
cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort())
|
||||
|
||||
// If +2/3 prevotes for a block or nil for *any* round:
|
||||
if blockID, ok := prevotes.TwoThirdsMajority(); ok {
|
||||
|
||||
// There was a polka!
|
||||
// If we're locked but this is a recent polka, unlock.
|
||||
// If it matches our ProposalBlock, update the ValidBlock
|
||||
|
||||
// Unlock if `cs.LockedRound < vote.Round <= cs.Round`
|
||||
// NOTE: If vote.Round > cs.Round, we'll deal with it when we get to vote.Round
|
||||
if (cs.LockedBlock != nil) &&
|
||||
(cs.LockedRound < vote.Round) &&
|
||||
(vote.Round <= cs.Round) &&
|
||||
!cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
|
||||
cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
|
||||
cs.LockedRound = 0
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
|
||||
}
|
||||
|
||||
// Update Valid* if we can.
|
||||
// NOTE: our proposal block may be nil or not what received a polka..
|
||||
// TODO: we may want to still update the ValidBlock and obtain it via gossipping
|
||||
if !blockID.IsZero() &&
|
||||
(cs.ValidRound < vote.Round) &&
|
||||
(vote.Round <= cs.Round) &&
|
||||
cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
|
||||
cs.Logger.Info("Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round)
|
||||
cs.ValidRound = vote.Round
|
||||
cs.ValidBlock = cs.ProposalBlock
|
||||
cs.ValidBlockParts = cs.ProposalBlockParts
|
||||
}
|
||||
}
|
||||
|
||||
// If +2/3 prevotes for *anything* for this or future round:
|
||||
if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() {
|
||||
// Round-skip over to PrevoteWait or goto Precommit.
|
||||
cs.enterNewRound(height, vote.Round) // if the vote is ahead of us
|
||||
if prevotes.HasTwoThirdsMajority() {
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
} else {
|
||||
cs.enterPrevote(height, vote.Round) // if the vote is ahead of us
|
||||
cs.enterPrevoteWait(height, vote.Round)
|
||||
}
|
||||
} else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round {
|
||||
// If the proposal is now complete, enter prevote of cs.Round.
|
||||
if cs.isProposalComplete() {
|
||||
cs.enterPrevote(height, cs.Round)
|
||||
}
|
||||
}
|
||||
|
||||
case types.VoteTypePrecommit:
|
||||
precommits := cs.Votes.Precommits(vote.Round)
|
||||
cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
|
||||
blockID, ok := precommits.TwoThirdsMajority()
|
||||
if ok {
|
||||
if len(blockID.Hash) == 0 {
|
||||
cs.enterNewRound(height, vote.Round+1)
|
||||
} else {
|
||||
cs.enterNewRound(height, vote.Round)
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
cs.enterCommit(height, vote.Round)
|
||||
|
||||
if cs.config.SkipTimeoutCommit && precommits.HasAll() {
|
||||
// if we have all the votes now,
|
||||
// go straight to new round (skip timeout commit)
|
||||
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
|
||||
cs.enterNewRound(cs.Height, 0)
|
||||
}
|
||||
|
||||
}
|
||||
} else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() {
|
||||
cs.enterNewRound(height, vote.Round)
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
cs.enterPrecommitWait(height, vote.Round)
|
||||
}
|
||||
default:
|
||||
panic(cmn.Fmt("Unexpected vote type %X", vote.Type)) // go-wire should prevent this.
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1472,12 +1563,11 @@ func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.Part
|
||||
cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
|
||||
cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
|
||||
return vote
|
||||
} else {
|
||||
//if !cs.replayMode {
|
||||
cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
|
||||
//}
|
||||
return nil
|
||||
}
|
||||
//if !cs.replayMode {
|
||||
cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
|
||||
//}
|
||||
return nil
|
||||
}
|
||||
|
||||
//---------------------------------------------------------
|
||||
|
@@ -261,7 +261,7 @@ func TestStateFullRound1(t *testing.T) {
|
||||
|
||||
// grab proposal
|
||||
re := <-propCh
|
||||
propBlockHash := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash()
|
||||
propBlockHash := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash()
|
||||
|
||||
<-voteCh // wait for prevote
|
||||
validatePrevote(t, cs, round, vss[0], propBlockHash)
|
||||
@@ -356,7 +356,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
cs1.startRoutines(0)
|
||||
|
||||
re := <-proposalCh
|
||||
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
theBlockHash := rs.ProposalBlock.Hash()
|
||||
|
||||
<-voteCh // prevote
|
||||
@@ -396,7 +396,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
|
||||
// now we're on a new round and not the proposer, so wait for timeout
|
||||
re = <-timeoutProposeCh
|
||||
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
|
||||
if rs.ProposalBlock != nil {
|
||||
panic("Expected proposal block to be nil")
|
||||
@@ -409,7 +409,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash())
|
||||
|
||||
// add a conflicting prevote from the other validator
|
||||
signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
|
||||
signAddVotes(cs1, types.VoteTypePrevote, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
|
||||
<-voteCh
|
||||
|
||||
// now we're going to enter prevote again, but with invalid args
|
||||
@@ -424,7 +424,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
|
||||
// add conflicting precommit from vs2
|
||||
// NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
|
||||
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
|
||||
<-voteCh
|
||||
|
||||
// (note we're entering precommit for a second time this round, but with invalid args
|
||||
@@ -440,7 +440,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
incrementRound(vs2)
|
||||
|
||||
re = <-proposalCh
|
||||
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
|
||||
// now we're on a new round and are the proposer
|
||||
if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) {
|
||||
@@ -529,7 +529,7 @@ func TestStateLockPOLRelock(t *testing.T) {
|
||||
|
||||
<-newRoundCh
|
||||
re := <-proposalCh
|
||||
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
theBlockHash := rs.ProposalBlock.Hash()
|
||||
|
||||
<-voteCh // prevote
|
||||
@@ -605,9 +605,9 @@ func TestStateLockPOLRelock(t *testing.T) {
|
||||
discardFromChan(voteCh, 2)
|
||||
|
||||
be := <-newBlockCh
|
||||
b := be.(types.TMEventData).Unwrap().(types.EventDataNewBlockHeader)
|
||||
b := be.(types.EventDataNewBlockHeader)
|
||||
re = <-newRoundCh
|
||||
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
if rs.Height != 2 {
|
||||
panic("Expected height to increment")
|
||||
}
|
||||
@@ -643,7 +643,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
|
||||
startTestRound(cs1, cs1.Height, 0)
|
||||
<-newRoundCh
|
||||
re := <-proposalCh
|
||||
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
theBlockHash := rs.ProposalBlock.Hash()
|
||||
|
||||
<-voteCh // prevote
|
||||
@@ -669,7 +669,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
|
||||
|
||||
// timeout to new round
|
||||
re = <-timeoutWaitCh
|
||||
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
lockedBlockHash := rs.LockedBlock.Hash()
|
||||
|
||||
//XXX: this isnt guaranteed to get there before the timeoutPropose ...
|
||||
@@ -731,7 +731,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
startTestRound(cs1, cs1.Height, 0)
|
||||
<-newRoundCh
|
||||
re := <-proposalCh
|
||||
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
propBlock := rs.ProposalBlock
|
||||
|
||||
<-voteCh // prevote
|
||||
@@ -781,7 +781,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
re = <-proposalCh
|
||||
}
|
||||
|
||||
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
|
||||
if rs.LockedBlock != nil {
|
||||
panic("we should not be locked!")
|
||||
@@ -1033,7 +1033,7 @@ func TestStateHalt1(t *testing.T) {
|
||||
startTestRound(cs1, cs1.Height, 0)
|
||||
<-newRoundCh
|
||||
re := <-proposalCh
|
||||
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
propBlock := rs.ProposalBlock
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
|
||||
@@ -1056,7 +1056,7 @@ func TestStateHalt1(t *testing.T) {
|
||||
// timeout to new round
|
||||
<-timeoutWaitCh
|
||||
re = <-newRoundCh
|
||||
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
|
||||
t.Log("### ONTO ROUND 1")
|
||||
/*Round2
|
||||
@@ -1074,7 +1074,7 @@ func TestStateHalt1(t *testing.T) {
|
||||
// receiving that precommit should take us straight to commit
|
||||
<-newBlockCh
|
||||
re = <-newRoundCh
|
||||
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
|
||||
|
||||
if rs.Height != 2 {
|
||||
panic("expected height to increment")
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -15,6 +16,10 @@ type RoundVoteSet struct {
|
||||
Precommits *types.VoteSet
|
||||
}
|
||||
|
||||
var (
|
||||
GotVoteFromUnwantedRoundError = errors.New("Peer has sent a vote that does not match our round for more than one round")
|
||||
)
|
||||
|
||||
/*
|
||||
Keeps track of all VoteSets from round 0 to round 'round'.
|
||||
|
||||
@@ -117,10 +122,8 @@ func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
voteSet = hvs.getVoteSet(vote.Round, vote.Type)
|
||||
hvs.peerCatchupRounds[peerID] = append(rndz, vote.Round)
|
||||
} else {
|
||||
// Peer has sent a vote that does not match our round,
|
||||
// for more than one round. Bad peer!
|
||||
// TODO punish peer.
|
||||
// log.Warn("Deal with peer giving votes from unwanted rounds")
|
||||
// punish peer
|
||||
err = GotVoteFromUnwantedRoundError
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -171,6 +174,26 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet {
|
||||
}
|
||||
}
|
||||
|
||||
// If a peer claims that it has 2/3 majority for given blockKey, call this.
|
||||
// NOTE: if there are too many peers, or too much peer churn,
|
||||
// this can cause memory issues.
|
||||
// TODO: implement ability to remove peers too
|
||||
func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID p2p.ID, blockID types.BlockID) error {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
if !types.IsVoteTypeValid(type_) {
|
||||
return fmt.Errorf("SetPeerMaj23: Invalid vote type %v", type_)
|
||||
}
|
||||
voteSet := hvs.getVoteSet(round, type_)
|
||||
if voteSet == nil {
|
||||
return nil // something we don't know about yet
|
||||
}
|
||||
return voteSet.SetPeerMaj23(types.P2PID(peerID), blockID)
|
||||
}
|
||||
|
||||
//---------------------------------------------------------
|
||||
// string and json
|
||||
|
||||
func (hvs *HeightVoteSet) String() string {
|
||||
return hvs.StringIndented("")
|
||||
}
|
||||
@@ -204,19 +227,35 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string {
|
||||
indent)
|
||||
}
|
||||
|
||||
// If a peer claims that it has 2/3 majority for given blockKey, call this.
|
||||
// NOTE: if there are too many peers, or too much peer churn,
|
||||
// this can cause memory issues.
|
||||
// TODO: implement ability to remove peers too
|
||||
func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID p2p.ID, blockID types.BlockID) error {
|
||||
func (hvs *HeightVoteSet) MarshalJSON() ([]byte, error) {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
if !types.IsVoteTypeValid(type_) {
|
||||
return fmt.Errorf("SetPeerMaj23: Invalid vote type %v", type_)
|
||||
}
|
||||
voteSet := hvs.getVoteSet(round, type_)
|
||||
if voteSet == nil {
|
||||
return nil // something we don't know about yet
|
||||
}
|
||||
return voteSet.SetPeerMaj23(peerID, blockID)
|
||||
|
||||
allVotes := hvs.toAllRoundVotes()
|
||||
return cdc.MarshalJSON(allVotes)
|
||||
}
|
||||
|
||||
func (hvs *HeightVoteSet) toAllRoundVotes() []roundVotes {
|
||||
totalRounds := hvs.round + 1
|
||||
allVotes := make([]roundVotes, totalRounds)
|
||||
// rounds 0 ~ hvs.round inclusive
|
||||
for round := 0; round < totalRounds; round++ {
|
||||
allVotes[round] = roundVotes{
|
||||
Round: round,
|
||||
Prevotes: hvs.roundVoteSets[round].Prevotes.VoteStrings(),
|
||||
PrevotesBitArray: hvs.roundVoteSets[round].Prevotes.BitArrayString(),
|
||||
Precommits: hvs.roundVoteSets[round].Precommits.VoteStrings(),
|
||||
PrecommitsBitArray: hvs.roundVoteSets[round].Precommits.BitArrayString(),
|
||||
}
|
||||
}
|
||||
// TODO: all other peer catchup rounds
|
||||
return allVotes
|
||||
}
|
||||
|
||||
type roundVotes struct {
|
||||
Round int `json:"round"`
|
||||
Prevotes []string `json:"prevotes"`
|
||||
PrevotesBitArray string `json:"prevotes_bit_array"`
|
||||
Precommits []string `json:"precommits"`
|
||||
PrecommitsBitArray string `json:"precommits_bit_array"`
|
||||
}
|
||||
|
@@ -34,8 +34,8 @@ func TestPeerCatchupRounds(t *testing.T) {
|
||||
|
||||
vote1001_0 := makeVoteHR(t, 1, 1001, privVals, 0)
|
||||
added, err = hvs.AddVote(vote1001_0, "peer1")
|
||||
if err != nil {
|
||||
t.Error("AddVote error", err)
|
||||
if err != GotVoteFromUnwantedRoundError {
|
||||
t.Errorf("Expected GotVoteFromUnwantedRoundError, but got %v", err)
|
||||
}
|
||||
if added {
|
||||
t.Error("Expected to *not* add vote from peer, too many catchup rounds.")
|
||||
@@ -48,7 +48,7 @@ func TestPeerCatchupRounds(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func makeVoteHR(t *testing.T, height int64, round int, privVals []*types.PrivValidatorFS, valIndex int) *types.Vote {
|
||||
func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivValidator, valIndex int) *types.Vote {
|
||||
privVal := privVals[valIndex]
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: privVal.GetAddress(),
|
||||
|
57
consensus/types/peer_round_state.go
Normal file
57
consensus/types/peer_round_state.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// PeerRoundState contains the known state of a peer.
|
||||
// NOTE: Read-only when returned by PeerState.GetRoundState().
|
||||
type PeerRoundState struct {
|
||||
Height int64 `json:"height"` // Height peer is at
|
||||
Round int `json:"round"` // Round peer is at, -1 if unknown.
|
||||
Step RoundStepType `json:"step"` // Step peer is at
|
||||
StartTime time.Time `json:"start_time"` // Estimated start of round 0 at this height
|
||||
Proposal bool `json:"proposal"` // True if peer has proposal for this round
|
||||
ProposalBlockPartsHeader types.PartSetHeader `json:"proposal_block_parts_header"` //
|
||||
ProposalBlockParts *cmn.BitArray `json:"proposal_block_parts"` //
|
||||
ProposalPOLRound int `json:"proposal_pol_round"` // Proposal's POL round. -1 if none.
|
||||
ProposalPOL *cmn.BitArray `json:"proposal_pol"` // nil until ProposalPOLMessage received.
|
||||
Prevotes *cmn.BitArray `json:"prevotes"` // All votes peer has for this round
|
||||
Precommits *cmn.BitArray `json:"precommits"` // All precommits peer has for this round
|
||||
LastCommitRound int `json:"last_commit_round"` // Round of commit for last height. -1 if none.
|
||||
LastCommit *cmn.BitArray `json:"last_commit"` // All commit precommits of commit for last height.
|
||||
CatchupCommitRound int `json:"catchup_commit_round"` // Round that we have commit for. Not necessarily unique. -1 if none.
|
||||
CatchupCommit *cmn.BitArray `json:"catchup_commit"` // All commit precommits peer has for this height & CatchupCommitRound
|
||||
}
|
||||
|
||||
// String returns a string representation of the PeerRoundState
|
||||
func (prs PeerRoundState) String() string {
|
||||
return prs.StringIndented("")
|
||||
}
|
||||
|
||||
// StringIndented returns a string representation of the PeerRoundState
|
||||
func (prs PeerRoundState) StringIndented(indent string) string {
|
||||
return fmt.Sprintf(`PeerRoundState{
|
||||
%s %v/%v/%v @%v
|
||||
%s Proposal %v -> %v
|
||||
%s POL %v (round %v)
|
||||
%s Prevotes %v
|
||||
%s Precommits %v
|
||||
%s LastCommit %v (round %v)
|
||||
%s Catchup %v (round %v)
|
||||
%s}`,
|
||||
indent, prs.Height, prs.Round, prs.Step, prs.StartTime,
|
||||
indent, prs.ProposalBlockPartsHeader, prs.ProposalBlockParts,
|
||||
indent, prs.ProposalPOL, prs.ProposalPOLRound,
|
||||
indent, prs.Prevotes,
|
||||
indent, prs.Precommits,
|
||||
indent, prs.LastCommit, prs.LastCommitRound,
|
||||
indent, prs.CatchupCommit, prs.CatchupCommitRound,
|
||||
indent)
|
||||
}
|
@@ -1,57 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// PeerRoundState contains the known state of a peer.
|
||||
// NOTE: Read-only when returned by PeerState.GetRoundState().
|
||||
type PeerRoundState struct {
|
||||
Height int64 // Height peer is at
|
||||
Round int // Round peer is at, -1 if unknown.
|
||||
Step RoundStepType // Step peer is at
|
||||
StartTime time.Time // Estimated start of round 0 at this height
|
||||
Proposal bool // True if peer has proposal for this round
|
||||
ProposalBlockPartsHeader types.PartSetHeader //
|
||||
ProposalBlockParts *cmn.BitArray //
|
||||
ProposalPOLRound int // Proposal's POL round. -1 if none.
|
||||
ProposalPOL *cmn.BitArray // nil until ProposalPOLMessage received.
|
||||
Prevotes *cmn.BitArray // All votes peer has for this round
|
||||
Precommits *cmn.BitArray // All precommits peer has for this round
|
||||
LastCommitRound int // Round of commit for last height. -1 if none.
|
||||
LastCommit *cmn.BitArray // All commit precommits of commit for last height.
|
||||
CatchupCommitRound int // Round that we have commit for. Not necessarily unique. -1 if none.
|
||||
CatchupCommit *cmn.BitArray // All commit precommits peer has for this height & CatchupCommitRound
|
||||
}
|
||||
|
||||
// String returns a string representation of the PeerRoundState
|
||||
func (prs PeerRoundState) String() string {
|
||||
return prs.StringIndented("")
|
||||
}
|
||||
|
||||
// StringIndented returns a string representation of the PeerRoundState
|
||||
func (prs PeerRoundState) StringIndented(indent string) string {
|
||||
return fmt.Sprintf(`PeerRoundState{
|
||||
%s %v/%v/%v @%v
|
||||
%s Proposal %v -> %v
|
||||
%s POL %v (round %v)
|
||||
%s Prevotes %v
|
||||
%s Precommits %v
|
||||
%s LastCommit %v (round %v)
|
||||
%s Catchup %v (round %v)
|
||||
%s}`,
|
||||
indent, prs.Height, prs.Round, prs.Step, prs.StartTime,
|
||||
indent, prs.ProposalBlockPartsHeader, prs.ProposalBlockParts,
|
||||
indent, prs.ProposalPOL, prs.ProposalPOLRound,
|
||||
indent, prs.Prevotes,
|
||||
indent, prs.Precommits,
|
||||
indent, prs.LastCommit, prs.LastCommitRound,
|
||||
indent, prs.CatchupCommit, prs.CatchupCommitRound,
|
||||
indent)
|
||||
}
|
@@ -1,10 +1,12 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -13,6 +15,7 @@ import (
|
||||
// RoundStepType enumerates the state of the consensus state machine
|
||||
type RoundStepType uint8 // These must be numeric, ordered.
|
||||
|
||||
// RoundStepType
|
||||
const (
|
||||
RoundStepNewHeight = RoundStepType(0x01) // Wait til CommitTime + timeoutCommit
|
||||
RoundStepNewRound = RoundStepType(0x02) // Setup new round and go to RoundStepPropose
|
||||
@@ -52,40 +55,66 @@ func (rs RoundStepType) String() string {
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// RoundState defines the internal consensus state.
|
||||
// It is Immutable when returned from ConsensusState.GetRoundState()
|
||||
// TODO: Actually, only the top pointer is copied,
|
||||
// so access to field pointers is still racey
|
||||
// NOTE: Not thread safe. Should only be manipulated by functions downstream
|
||||
// of the cs.receiveRoutine
|
||||
type RoundState struct {
|
||||
Height int64 // Height we are working on
|
||||
Round int
|
||||
Step RoundStepType
|
||||
StartTime time.Time
|
||||
CommitTime time.Time // Subjective time when +2/3 precommits for Block at Round were found
|
||||
Validators *types.ValidatorSet
|
||||
Proposal *types.Proposal
|
||||
ProposalBlock *types.Block
|
||||
ProposalBlockParts *types.PartSet
|
||||
LockedRound int
|
||||
LockedBlock *types.Block
|
||||
LockedBlockParts *types.PartSet
|
||||
Votes *HeightVoteSet
|
||||
CommitRound int //
|
||||
LastCommit *types.VoteSet // Last precommits at Height-1
|
||||
LastValidators *types.ValidatorSet
|
||||
Height int64 `json:"height"` // Height we are working on
|
||||
Round int `json:"round"`
|
||||
Step RoundStepType `json:"step"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
CommitTime time.Time `json:"commit_time"` // Subjective time when +2/3 precommits for Block at Round were found
|
||||
Validators *types.ValidatorSet `json:"validators"`
|
||||
Proposal *types.Proposal `json:"proposal"`
|
||||
ProposalBlock *types.Block `json:"proposal_block"`
|
||||
ProposalBlockParts *types.PartSet `json:"proposal_block_parts"`
|
||||
LockedRound int `json:"locked_round"`
|
||||
LockedBlock *types.Block `json:"locked_block"`
|
||||
LockedBlockParts *types.PartSet `json:"locked_block_parts"`
|
||||
ValidRound int `json:"valid_round"` // Last known round with POL for non-nil valid block.
|
||||
ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above.
|
||||
ValidBlockParts *types.PartSet `json:"valid_block_parts"` // Last known block parts of POL metnioned above.
|
||||
Votes *HeightVoteSet `json:"votes"`
|
||||
CommitRound int `json:"commit_round"` //
|
||||
LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1
|
||||
LastValidators *types.ValidatorSet `json:"last_validators"`
|
||||
}
|
||||
|
||||
// Compressed version of the RoundState for use in RPC
|
||||
type RoundStateSimple struct {
|
||||
HeightRoundStep string `json:"height/round/step"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
ProposalBlockHash cmn.HexBytes `json:"proposal_block_hash"`
|
||||
LockedBlockHash cmn.HexBytes `json:"locked_block_hash"`
|
||||
ValidBlockHash cmn.HexBytes `json:"valid_block_hash"`
|
||||
Votes json.RawMessage `json:"height_vote_set"`
|
||||
}
|
||||
|
||||
// Compress the RoundState to RoundStateSimple
|
||||
func (rs *RoundState) RoundStateSimple() RoundStateSimple {
|
||||
votesJSON, err := rs.Votes.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return RoundStateSimple{
|
||||
HeightRoundStep: fmt.Sprintf("%d/%d/%d", rs.Height, rs.Round, rs.Step),
|
||||
StartTime: rs.StartTime,
|
||||
ProposalBlockHash: rs.ProposalBlock.Hash(),
|
||||
LockedBlockHash: rs.LockedBlock.Hash(),
|
||||
ValidBlockHash: rs.ValidBlock.Hash(),
|
||||
Votes: votesJSON,
|
||||
}
|
||||
}
|
||||
|
||||
// RoundStateEvent returns the H/R/S of the RoundState as an event.
|
||||
func (rs *RoundState) RoundStateEvent() types.EventDataRoundState {
|
||||
// XXX: copy the RoundState
|
||||
// if we want to avoid this, we may need synchronous events after all
|
||||
rs_ := *rs
|
||||
rsCopy := *rs
|
||||
edrs := types.EventDataRoundState{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
Step: rs.Step.String(),
|
||||
RoundState: &rs_,
|
||||
RoundState: &rsCopy,
|
||||
}
|
||||
return edrs
|
||||
}
|
||||
@@ -106,6 +135,8 @@ func (rs *RoundState) StringIndented(indent string) string {
|
||||
%s ProposalBlock: %v %v
|
||||
%s LockedRound: %v
|
||||
%s LockedBlock: %v %v
|
||||
%s ValidRound: %v
|
||||
%s ValidBlock: %v %v
|
||||
%s Votes: %v
|
||||
%s LastCommit: %v
|
||||
%s LastValidators:%v
|
||||
@@ -113,14 +144,16 @@ func (rs *RoundState) StringIndented(indent string) string {
|
||||
indent, rs.Height, rs.Round, rs.Step,
|
||||
indent, rs.StartTime,
|
||||
indent, rs.CommitTime,
|
||||
indent, rs.Validators.StringIndented(indent+" "),
|
||||
indent, rs.Validators.StringIndented(indent+" "),
|
||||
indent, rs.Proposal,
|
||||
indent, rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort(),
|
||||
indent, rs.LockedRound,
|
||||
indent, rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort(),
|
||||
indent, rs.Votes.StringIndented(indent+" "),
|
||||
indent, rs.ValidRound,
|
||||
indent, rs.ValidBlockParts.StringShort(), rs.ValidBlock.StringShort(),
|
||||
indent, rs.Votes.StringIndented(indent+" "),
|
||||
indent, rs.LastCommit.StringShort(),
|
||||
indent, rs.LastValidators.StringIndented(indent+" "),
|
||||
indent, rs.LastValidators.StringIndented(indent+" "),
|
||||
indent)
|
||||
}
|
||||
|
95
consensus/types/round_state_test.go
Normal file
95
consensus/types/round_state_test.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/go-crypto"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
func BenchmarkRoundStateDeepCopy(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
// Random validators
|
||||
nval, ntxs := 100, 100
|
||||
vset, _ := types.RandValidatorSet(nval, 1)
|
||||
precommits := make([]*types.Vote, nval)
|
||||
blockID := types.BlockID{
|
||||
Hash: cmn.RandBytes(20),
|
||||
PartsHeader: types.PartSetHeader{
|
||||
Hash: cmn.RandBytes(20),
|
||||
},
|
||||
}
|
||||
sig := crypto.SignatureEd25519{}
|
||||
for i := 0; i < nval; i++ {
|
||||
precommits[i] = &types.Vote{
|
||||
ValidatorAddress: types.Address(cmn.RandBytes(20)),
|
||||
Timestamp: time.Now(),
|
||||
BlockID: blockID,
|
||||
Signature: sig,
|
||||
}
|
||||
}
|
||||
txs := make([]types.Tx, ntxs)
|
||||
for i := 0; i < ntxs; i++ {
|
||||
txs[i] = cmn.RandBytes(100)
|
||||
}
|
||||
// Random block
|
||||
block := &types.Block{
|
||||
Header: &types.Header{
|
||||
ChainID: cmn.RandStr(12),
|
||||
Time: time.Now(),
|
||||
LastBlockID: blockID,
|
||||
LastCommitHash: cmn.RandBytes(20),
|
||||
DataHash: cmn.RandBytes(20),
|
||||
ValidatorsHash: cmn.RandBytes(20),
|
||||
ConsensusHash: cmn.RandBytes(20),
|
||||
AppHash: cmn.RandBytes(20),
|
||||
LastResultsHash: cmn.RandBytes(20),
|
||||
EvidenceHash: cmn.RandBytes(20),
|
||||
},
|
||||
Data: &types.Data{
|
||||
Txs: txs,
|
||||
},
|
||||
Evidence: types.EvidenceData{},
|
||||
LastCommit: &types.Commit{
|
||||
BlockID: blockID,
|
||||
Precommits: precommits,
|
||||
},
|
||||
}
|
||||
parts := block.MakePartSet(4096)
|
||||
// Random Proposal
|
||||
proposal := &types.Proposal{
|
||||
Timestamp: time.Now(),
|
||||
BlockPartsHeader: types.PartSetHeader{
|
||||
Hash: cmn.RandBytes(20),
|
||||
},
|
||||
POLBlockID: blockID,
|
||||
Signature: sig,
|
||||
}
|
||||
// Random HeightVoteSet
|
||||
// TODO: hvs :=
|
||||
|
||||
rs := &RoundState{
|
||||
StartTime: time.Now(),
|
||||
CommitTime: time.Now(),
|
||||
Validators: vset,
|
||||
Proposal: proposal,
|
||||
ProposalBlock: block,
|
||||
ProposalBlockParts: parts,
|
||||
LockedBlock: block,
|
||||
LockedBlockParts: parts,
|
||||
ValidBlock: block,
|
||||
ValidBlockParts: parts,
|
||||
Votes: nil, // TODO
|
||||
LastCommit: nil, // TODO
|
||||
LastValidators: vset,
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
amino.DeepCopy(rs)
|
||||
}
|
||||
}
|
12
consensus/types/wire.go
Normal file
12
consensus/types/wire.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/go-crypto"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
crypto.RegisterAmino(cdc)
|
||||
}
|
@@ -1,7 +1,6 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
@@ -11,7 +10,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
wire "github.com/tendermint/go-wire"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
auto "github.com/tendermint/tmlibs/autofile"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
@@ -38,13 +37,13 @@ type EndHeightMessage struct {
|
||||
|
||||
type WALMessage interface{}
|
||||
|
||||
var _ = wire.RegisterInterface(
|
||||
struct{ WALMessage }{},
|
||||
wire.ConcreteType{types.EventDataRoundState{}, 0x01},
|
||||
wire.ConcreteType{msgInfo{}, 0x02},
|
||||
wire.ConcreteType{timeoutInfo{}, 0x03},
|
||||
wire.ConcreteType{EndHeightMessage{}, 0x04},
|
||||
)
|
||||
func RegisterWALMessages(cdc *amino.Codec) {
|
||||
cdc.RegisterInterface((*WALMessage)(nil), nil)
|
||||
cdc.RegisterConcrete(types.EventDataRoundState{}, "tendermint/wal/EventDataRoundState", nil)
|
||||
cdc.RegisterConcrete(msgInfo{}, "tendermint/wal/MsgInfo", nil)
|
||||
cdc.RegisterConcrete(timeoutInfo{}, "tendermint/wal/TimeoutInfo", nil)
|
||||
cdc.RegisterConcrete(EndHeightMessage{}, "tendermint/wal/EndHeightMessage", nil)
|
||||
}
|
||||
|
||||
//--------------------------------------------------------
|
||||
// Simple write-ahead logger
|
||||
@@ -68,12 +67,11 @@ type baseWAL struct {
|
||||
cmn.BaseService
|
||||
|
||||
group *auto.Group
|
||||
light bool // ignore block parts
|
||||
|
||||
enc *WALEncoder
|
||||
}
|
||||
|
||||
func NewWAL(walFile string, light bool) (*baseWAL, error) {
|
||||
func NewWAL(walFile string) (*baseWAL, error) {
|
||||
err := cmn.EnsureDir(filepath.Dir(walFile), 0700)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to ensure WAL directory is in place")
|
||||
@@ -85,7 +83,6 @@ func NewWAL(walFile string, light bool) (*baseWAL, error) {
|
||||
}
|
||||
wal := &baseWAL{
|
||||
group: group,
|
||||
light: light,
|
||||
enc: NewWALEncoder(group),
|
||||
}
|
||||
wal.BaseService = *cmn.NewBaseService(nil, "baseWAL", wal)
|
||||
@@ -118,15 +115,6 @@ func (wal *baseWAL) Save(msg WALMessage) {
|
||||
return
|
||||
}
|
||||
|
||||
if wal.light {
|
||||
// in light mode we only write new steps, timeouts, and our own votes (no proposals, block parts)
|
||||
if mi, ok := msg.(msgInfo); ok {
|
||||
if mi.PeerID != "" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write the wal message
|
||||
if err := wal.enc.Encode(&TimedWALMessage{time.Now(), msg}); err != nil {
|
||||
cmn.PanicQ(cmn.Fmt("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg))
|
||||
@@ -193,7 +181,7 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
|
||||
|
||||
// A WALEncoder writes custom-encoded WAL messages to an output stream.
|
||||
//
|
||||
// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value (go-wire encoded)
|
||||
// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value (go-amino encoded)
|
||||
type WALEncoder struct {
|
||||
wr io.Writer
|
||||
}
|
||||
@@ -205,7 +193,7 @@ func NewWALEncoder(wr io.Writer) *WALEncoder {
|
||||
|
||||
// Encode writes the custom encoding of v to the stream.
|
||||
func (enc *WALEncoder) Encode(v *TimedWALMessage) error {
|
||||
data := wire.BinaryBytes(v)
|
||||
data := cdc.MustMarshalBinaryBare(v)
|
||||
|
||||
crc := crc32.Checksum(data, crc32c)
|
||||
length := uint32(len(data))
|
||||
@@ -298,9 +286,8 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
|
||||
return nil, DataCorruptionError{fmt.Errorf("checksums do not match: (read: %v, actual: %v)", crc, actualCRC)}
|
||||
}
|
||||
|
||||
var nn int
|
||||
var res *TimedWALMessage // nolint: gosimple
|
||||
res = wire.ReadBinary(&TimedWALMessage{}, bytes.NewBuffer(data), int(length), &nn, &err).(*TimedWALMessage)
|
||||
var res = new(TimedWALMessage) // nolint: gosimple
|
||||
err = cdc.UnmarshalBinaryBare(data, res)
|
||||
if err != nil {
|
||||
return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)}
|
||||
}
|
||||
|
@@ -4,19 +4,19 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
"github.com/tendermint/abci/example/kvstore"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
||||
auto "github.com/tendermint/tmlibs/autofile"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/db"
|
||||
@@ -25,13 +25,13 @@ import (
|
||||
|
||||
// WALWithNBlocks generates a consensus WAL. It does this by spining up a
|
||||
// stripped down version of node (proxy app, event bus, consensus state) with a
|
||||
// persistent dummy application and special consensus wal instance
|
||||
// persistent kvstore application and special consensus wal instance
|
||||
// (byteBufferWAL) and waits until numBlocks are created. Then it returns a WAL
|
||||
// content.
|
||||
func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
||||
config := getConfig()
|
||||
|
||||
app := dummy.NewPersistentDummyApplication(filepath.Join(config.DBDir(), "wal_generator"))
|
||||
app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator"))
|
||||
|
||||
logger := log.TestingLogger().With("wal_generator", "wal_generator")
|
||||
logger.Info("generating WAL (last height msg excluded)", "numBlocks", numBlocks)
|
||||
@@ -40,7 +40,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
||||
// COPY PASTE FROM node.go WITH A FEW MODIFICATIONS
|
||||
// NOTE: we can't import node package because of circular dependency
|
||||
privValidatorFile := config.PrivValidatorFile()
|
||||
privValidator := types.LoadOrGenPrivValidatorFS(privValidatorFile)
|
||||
privValidator := pvm.LoadOrGenFilePV(privValidatorFile)
|
||||
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read genesis file")
|
||||
@@ -52,7 +52,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
||||
return nil, errors.Wrap(err, "failed to make genesis state")
|
||||
}
|
||||
blockStore := bc.NewBlockStore(blockStoreDB)
|
||||
handshaker := NewHandshaker(stateDB, state, blockStore)
|
||||
handshaker := NewHandshaker(stateDB, state, blockStore, genDoc.AppState())
|
||||
proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker)
|
||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
@@ -116,7 +116,7 @@ func makePathname() string {
|
||||
func randPort() int {
|
||||
// returns between base and base + spread
|
||||
base, spread := 20000, 20000
|
||||
return base + rand.Intn(spread)
|
||||
return base + cmn.RandIntn(spread)
|
||||
}
|
||||
|
||||
func makeAddrs() (string, string, string) {
|
||||
|
@@ -3,11 +3,10 @@ package consensus
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"sync"
|
||||
// "sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
wire "github.com/tendermint/go-wire"
|
||||
"github.com/tendermint/tendermint/consensus/types"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
@@ -36,7 +35,7 @@ func TestWALEncoderDecoder(t *testing.T) {
|
||||
decoded, err := dec.Decode()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, msg.Time.Truncate(time.Millisecond), decoded.Time)
|
||||
assert.Equal(t, msg.Time.UTC(), decoded.Time)
|
||||
assert.Equal(t, msg.Msg, decoded.Msg)
|
||||
}
|
||||
}
|
||||
@@ -48,7 +47,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
||||
}
|
||||
walFile := tempWALWithData(walBody)
|
||||
|
||||
wal, err := NewWAL(walFile, false)
|
||||
wal, err := NewWAL(walFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -68,6 +67,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
||||
assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height"))
|
||||
}
|
||||
|
||||
/*
|
||||
var initOnce sync.Once
|
||||
|
||||
func registerInterfacesOnce() {
|
||||
@@ -78,6 +78,7 @@ func registerInterfacesOnce() {
|
||||
)
|
||||
})
|
||||
}
|
||||
*/
|
||||
|
||||
func nBytes(n int) []byte {
|
||||
buf := make([]byte, n)
|
||||
@@ -86,7 +87,7 @@ func nBytes(n int) []byte {
|
||||
}
|
||||
|
||||
func benchmarkWalDecode(b *testing.B, n int) {
|
||||
registerInterfacesOnce()
|
||||
// registerInterfacesOnce()
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
enc := NewWALEncoder(buf)
|
||||
|
14
consensus/wire.go
Normal file
14
consensus/wire.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/go-crypto"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
RegisterConsensusMessages(cdc)
|
||||
RegisterWALMessages(cdc)
|
||||
crypto.RegisterAmino(cdc)
|
||||
}
|
68
docker-compose.yml
Normal file
68
docker-compose.yml
Normal file
@@ -0,0 +1,68 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
node0:
|
||||
container_name: node0
|
||||
image: "tendermint/localnode"
|
||||
ports:
|
||||
- "46656-46657:46656-46657"
|
||||
environment:
|
||||
- ID=0
|
||||
- LOG=$${LOG:-tendermint.log}
|
||||
volumes:
|
||||
- ./build:/tendermint:Z
|
||||
networks:
|
||||
localnet:
|
||||
ipv4_address: 192.167.10.2
|
||||
|
||||
node1:
|
||||
container_name: node1
|
||||
image: "tendermint/localnode"
|
||||
ports:
|
||||
- "46659-46660:46656-46657"
|
||||
environment:
|
||||
- ID=1
|
||||
- LOG=$${LOG:-tendermint.log}
|
||||
volumes:
|
||||
- ./build:/tendermint:Z
|
||||
networks:
|
||||
localnet:
|
||||
ipv4_address: 192.167.10.3
|
||||
|
||||
node2:
|
||||
container_name: node2
|
||||
image: "tendermint/localnode"
|
||||
environment:
|
||||
- ID=2
|
||||
- LOG=$${LOG:-tendermint.log}
|
||||
ports:
|
||||
- "46661-46662:46656-46657"
|
||||
volumes:
|
||||
- ./build:/tendermint:Z
|
||||
networks:
|
||||
localnet:
|
||||
ipv4_address: 192.167.10.4
|
||||
|
||||
node3:
|
||||
container_name: node3
|
||||
image: "tendermint/localnode"
|
||||
environment:
|
||||
- ID=3
|
||||
- LOG=$${LOG:-tendermint.log}
|
||||
ports:
|
||||
- "46663-46664:46656-46657"
|
||||
volumes:
|
||||
- ./build:/tendermint:Z
|
||||
networks:
|
||||
localnet:
|
||||
ipv4_address: 192.167.10.5
|
||||
|
||||
networks:
|
||||
localnet:
|
||||
driver: bridge
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
-
|
||||
subnet: 192.167.10.0/16
|
||||
|
@@ -16,15 +16,15 @@ Next, install the ``abci-cli`` tool and example applications:
|
||||
|
||||
go get -u github.com/tendermint/abci/cmd/abci-cli
|
||||
|
||||
If this fails, you may need to use ``glide`` to get vendored
|
||||
If this fails, you may need to use `dep <https://github.com/golang/dep>`__ to get vendored
|
||||
dependencies:
|
||||
|
||||
::
|
||||
|
||||
go get github.com/Masterminds/glide
|
||||
cd $GOPATH/src/github.com/tendermint/abci
|
||||
glide install
|
||||
go install ./cmd/abci-cli
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make install
|
||||
|
||||
Now run ``abci-cli`` to see the list of commands:
|
||||
|
||||
@@ -40,7 +40,7 @@ Now run ``abci-cli`` to see the list of commands:
|
||||
console Start an interactive abci console for multiple commands
|
||||
counter ABCI demo example
|
||||
deliver_tx Deliver a new tx to the application
|
||||
dummy ABCI demo example
|
||||
kvstore ABCI demo example
|
||||
echo Have the application echo a message
|
||||
help Help about any command
|
||||
info Get some info about the application
|
||||
@@ -56,8 +56,8 @@ Now run ``abci-cli`` to see the list of commands:
|
||||
Use "abci-cli [command] --help" for more information about a command.
|
||||
|
||||
|
||||
Dummy - First Example
|
||||
---------------------
|
||||
KVStore - First Example
|
||||
-----------------------
|
||||
|
||||
The ``abci-cli`` tool lets us send ABCI messages to our application, to
|
||||
help build and debug them.
|
||||
@@ -66,8 +66,8 @@ The most important messages are ``deliver_tx``, ``check_tx``, and
|
||||
``commit``, but there are others for convenience, configuration, and
|
||||
information purposes.
|
||||
|
||||
We'll start a dummy application, which was installed at the same time as
|
||||
``abci-cli`` above. The dummy just stores transactions in a merkle tree.
|
||||
We'll start a kvstore application, which was installed at the same time as
|
||||
``abci-cli`` above. The kvstore just stores transactions in a merkle tree.
|
||||
|
||||
Its code can be found `here <https://github.com/tendermint/abci/blob/master/cmd/abci-cli/abci-cli.go>`__ and looks like:
|
||||
|
||||
@@ -75,20 +75,20 @@ Its code can be found `here <https://github.com/tendermint/abci/blob/master/cmd/
|
||||
|
||||
.. container:: header
|
||||
|
||||
**Show/Hide Dummy Example**
|
||||
**Show/Hide KVStore Example**
|
||||
|
||||
.. code-block:: go
|
||||
|
||||
func cmdDummy(cmd *cobra.Command, args []string) error {
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
app = dummy.NewDummyApplication()
|
||||
app = kvstore.NewKVStoreApplication()
|
||||
} else {
|
||||
app = dummy.NewPersistentDummyApplication(flagPersist)
|
||||
app.(*dummy.PersistentDummyApplication).SetLogger(logger.With("module", "dummy"))
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
}
|
||||
|
||||
// Start the listener
|
||||
@@ -113,7 +113,7 @@ Start by running:
|
||||
|
||||
::
|
||||
|
||||
abci-cli dummy
|
||||
abci-cli kvstore
|
||||
|
||||
And in another terminal, run
|
||||
|
||||
@@ -229,7 +229,7 @@ Counter - Another Example
|
||||
Now that we've got the hang of it, let's try another application, the
|
||||
"counter" app.
|
||||
|
||||
Like the dummy app, its code can be found `here <https://github.com/tendermint/abci/blob/master/cmd/abci-cli/abci-cli.go>`__ and looks like:
|
||||
Like the kvstore app, its code can be found `here <https://github.com/tendermint/abci/blob/master/cmd/abci-cli/abci-cli.go>`__ and looks like:
|
||||
|
||||
.. container:: toggle
|
||||
|
||||
@@ -288,7 +288,7 @@ other peers.
|
||||
In this instance of the counter app, ``check_tx`` only allows
|
||||
transactions whose integer is greater than the last committed one.
|
||||
|
||||
Let's kill the console and the dummy application, and start the counter
|
||||
Let's kill the console and the kvstore application, and start the counter
|
||||
app:
|
||||
|
||||
::
|
||||
@@ -328,7 +328,7 @@ In another window, start the ``abci-cli console``:
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A327D
|
||||
|
||||
This is a very simple application, but between ``counter`` and
|
||||
``dummy``, its easy to see how you can build out arbitrary application
|
||||
``kvstore``, its easy to see how you can build out arbitrary application
|
||||
states on top of the ABCI. `Hyperledger's
|
||||
Burrow <https://github.com/hyperledger/burrow>`__ also runs atop ABCI,
|
||||
bringing with it Ethereum-like accounts, the Ethereum virtual-machine,
|
||||
|
@@ -66,15 +66,14 @@ and possibly await a response). And one method to query app-specific
|
||||
data from the ABCI application.
|
||||
|
||||
Pros:
|
||||
* Server code already written
|
||||
* Access to block headers to validate merkle proofs (nice for light clients)
|
||||
* Basic read/write functionality is supported
|
||||
|
||||
- Server code already written
|
||||
- Access to block headers to validate merkle proofs (nice for light clients)
|
||||
- Basic read/write functionality is supported
|
||||
|
||||
Cons:
|
||||
* Limited interface to app. All queries must be serialized into
|
||||
[]byte (less expressive than JSON over HTTP) and there is no way to push
|
||||
data from ABCI app to the client (eg. notify me if account X receives a
|
||||
transaction)
|
||||
|
||||
- Limited interface to app. All queries must be serialized into []byte (less expressive than JSON over HTTP) and there is no way to push data from ABCI app to the client (eg. notify me if account X receives a transaction)
|
||||
|
||||
Custom ABCI server
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
@@ -92,14 +91,19 @@ store. For "reads", we can do any queries we wish that are supported by
|
||||
our architecture, using any web technology that is useful. The general
|
||||
architecture is shown in the following diagram:
|
||||
|
||||
Pros: \* Separates application logic from blockchain logic \* Allows
|
||||
much richer, more flexible client-facing API \* Allows pub-sub, watching
|
||||
certain fields, etc.
|
||||
.. figure:: assets/tm-application-example.png
|
||||
|
||||
Cons: \* Access to ABCI app can be dangerous (be VERY careful not to
|
||||
write unless it comes from the validator node) \* No direct access to
|
||||
the blockchain headers to verify tx \* You must write your own API (but
|
||||
maybe that's a pro...)
|
||||
Pros:
|
||||
|
||||
- Separates application logic from blockchain logic
|
||||
- Allows much richer, more flexible client-facing API
|
||||
- Allows pub-sub, watching certain fields, etc.
|
||||
|
||||
Cons:
|
||||
|
||||
- Access to ABCI app can be dangerous (be VERY careful not to write unless it comes from the validator node)
|
||||
- No direct access to the blockchain headers to verify tx
|
||||
- You must write your own API (but maybe that's a pro...)
|
||||
|
||||
Hybrid solutions
|
||||
~~~~~~~~~~~~~~~~
|
||||
@@ -108,9 +112,13 @@ Likely the least secure but most versatile. The client can access both
|
||||
the tendermint node for all blockchain info, as well as a custom app
|
||||
server, for complex queries and pub-sub on the abci app.
|
||||
|
||||
Pros: All from both above solutions
|
||||
Pros:
|
||||
|
||||
Cons: Even more complexity; even more attack vectors (less
|
||||
- All from both above solutions
|
||||
|
||||
Cons:
|
||||
|
||||
- Even more complexity; even more attack vectors (less
|
||||
security)
|
||||
|
||||
Scalability
|
||||
|
@@ -142,10 +142,10 @@ It is unlikely that you will need to implement a client. For details of
|
||||
our client, see
|
||||
`here <https://github.com/tendermint/abci/tree/master/client>`__.
|
||||
|
||||
Most of the examples below are from `dummy application
|
||||
<https://github.com/tendermint/abci/blob/master/example/dummy/dummy.go>`__,
|
||||
which is a part of the abci repo. `persistent_dummy application
|
||||
<https://github.com/tendermint/abci/blob/master/example/dummy/persistent_dummy.go>`__
|
||||
Most of the examples below are from `kvstore application
|
||||
<https://github.com/tendermint/abci/blob/master/example/kvstore/kvstore.go>`__,
|
||||
which is a part of the abci repo. `persistent_kvstore application
|
||||
<https://github.com/tendermint/abci/blob/master/example/kvstore/persistent_kvstore.go>`__
|
||||
is used to show ``BeginBlock``, ``EndBlock`` and ``InitChain``
|
||||
example implementations.
|
||||
|
||||
@@ -178,21 +178,22 @@ connection, to query the local state of the app.
|
||||
Mempool Connection
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The mempool connection is used *only* for CheckTx requests. Transactions
|
||||
are run using CheckTx in the same order they were received by the
|
||||
validator. If the CheckTx returns ``OK``, the transaction is kept in
|
||||
memory and relayed to other peers in the same order it was received.
|
||||
Otherwise, it is discarded.
|
||||
The mempool connection is used *only* for CheckTx requests.
|
||||
Transactions are run using CheckTx in the same order they were
|
||||
received by the validator. If the CheckTx returns ``OK``, the
|
||||
transaction is kept in memory and relayed to other peers in the same
|
||||
order it was received. Otherwise, it is discarded.
|
||||
|
||||
CheckTx requests run concurrently with block processing; so they should
|
||||
run against a copy of the main application state which is reset after
|
||||
every block. This copy is necessary to track transitions made by a
|
||||
sequence of CheckTx requests before they are included in a block. When a
|
||||
block is committed, the application must ensure to reset the mempool
|
||||
state to the latest committed state. Tendermint Core will then filter
|
||||
through all transactions in the mempool, removing any that were included
|
||||
in the block, and re-run the rest using CheckTx against the post-Commit
|
||||
mempool state.
|
||||
CheckTx requests run concurrently with block processing; so they
|
||||
should run against a copy of the main application state which is reset
|
||||
after every block. This copy is necessary to track transitions made by
|
||||
a sequence of CheckTx requests before they are included in a block.
|
||||
When a block is committed, the application must ensure to reset the
|
||||
mempool state to the latest committed state. Tendermint Core will then
|
||||
filter through all transactions in the mempool, removing any that were
|
||||
included in the block, and re-run the rest using CheckTx against the
|
||||
post-Commit mempool state (this behaviour can be turned off with
|
||||
``[mempool] recheck = false``).
|
||||
|
||||
.. container:: toggle
|
||||
|
||||
@@ -202,7 +203,7 @@ mempool state.
|
||||
|
||||
.. code-block:: go
|
||||
|
||||
func (app *DummyApplication) CheckTx(tx []byte) types.Result {
|
||||
func (app *KVStoreApplication) CheckTx(tx []byte) types.Result {
|
||||
return types.OK
|
||||
}
|
||||
|
||||
@@ -226,6 +227,23 @@ mempool state.
|
||||
}
|
||||
}
|
||||
|
||||
Replay Protection
|
||||
^^^^^^^^^^^^^^^^^
|
||||
To prevent old transactions from being replayed, CheckTx must
|
||||
implement replay protection.
|
||||
|
||||
Tendermint provides the first defence layer by keeping a lightweight
|
||||
in-memory cache of 100k (``[mempool] cache_size``) last transactions in
|
||||
the mempool. If Tendermint is just started or the clients sent more
|
||||
than 100k transactions, old transactions may be sent to the
|
||||
application. So it is important CheckTx implements some logic to
|
||||
handle them.
|
||||
|
||||
There are cases where a transaction will (or may) become valid in some
|
||||
future state, in which case you probably want to disable Tendermint's
|
||||
cache. You can do that by setting ``[mempool] cache_size = 0`` in the
|
||||
config.
|
||||
|
||||
Consensus Connection
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@@ -263,7 +281,7 @@ merkle root of the data returned by the DeliverTx requests, or both.
|
||||
.. code-block:: go
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *DummyApplication) DeliverTx(tx []byte) types.Result {
|
||||
func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result {
|
||||
parts := strings.Split(string(tx), "=")
|
||||
if len(parts) == 2 {
|
||||
app.state.Set([]byte(parts[0]), []byte(parts[1]))
|
||||
@@ -327,7 +345,7 @@ job of the `Handshake <#handshake>`__.
|
||||
|
||||
.. code-block:: go
|
||||
|
||||
func (app *DummyApplication) Commit() types.Result {
|
||||
func (app *KVStoreApplication) Commit() types.Result {
|
||||
hash := app.state.Hash()
|
||||
return types.NewResultOK(hash, "")
|
||||
}
|
||||
@@ -369,7 +387,7 @@ pick up from when it restarts. See information on the Handshake, below.
|
||||
.. code-block:: go
|
||||
|
||||
// Track the block hash and header information
|
||||
func (app *PersistentDummyApplication) BeginBlock(params types.RequestBeginBlock) {
|
||||
func (app *PersistentKVStoreApplication) BeginBlock(params types.RequestBeginBlock) {
|
||||
// update latest block info
|
||||
app.blockHeader = params.Header
|
||||
|
||||
@@ -423,7 +441,7 @@ for details on how it tracks validators.
|
||||
.. code-block:: go
|
||||
|
||||
// Update the validator set
|
||||
func (app *PersistentDummyApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
|
||||
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
|
||||
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
|
||||
}
|
||||
|
||||
@@ -477,7 +495,7 @@ Note: these query formats are subject to change!
|
||||
|
||||
.. code-block:: go
|
||||
|
||||
func (app *DummyApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
if reqQuery.Prove {
|
||||
value, proof, exists := app.state.Proof(reqQuery.Data)
|
||||
resQuery.Index = -1 // TODO make Proof return index
|
||||
@@ -561,7 +579,7 @@ all blocks.
|
||||
|
||||
.. code-block:: go
|
||||
|
||||
func (app *DummyApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
return types.ResponseInfo{Data: cmn.Fmt("{\"size\":%v}", app.state.Size())}
|
||||
}
|
||||
|
||||
@@ -595,7 +613,7 @@ consensus params.
|
||||
.. code-block:: go
|
||||
|
||||
// Save the validators in the merkle tree
|
||||
func (app *PersistentDummyApplication) InitChain(params types.RequestInitChain) {
|
||||
func (app *PersistentKVStoreApplication) InitChain(params types.RequestInitChain) {
|
||||
for _, v := range params.Validators {
|
||||
r := app.updateValidator(v)
|
||||
if r.IsErr() {
|
||||
|
29
docs/architecture/adr-008-priv-validator.md
Normal file
29
docs/architecture/adr-008-priv-validator.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# ADR 008: SocketPV
|
||||
|
||||
Tendermint node's should support only two in-process PrivValidator
|
||||
implementations:
|
||||
|
||||
- FilePV uses an unencrypted private key in a "priv_validator.json" file - no
|
||||
configuration required (just `tendermint init`).
|
||||
- SocketPV uses a socket to send signing requests to another process - user is
|
||||
responsible for starting that process themselves.
|
||||
|
||||
The SocketPV address can be provided via flags at the command line - doing so
|
||||
will cause Tendermint to ignore any "priv_validator.json" file and to listen on
|
||||
the given address for incoming connections from an external priv_validator
|
||||
process. It will halt any operation until at least one external process
|
||||
succesfully connected.
|
||||
|
||||
The external priv_validator process will dial the address to connect to
|
||||
Tendermint, and then Tendermint will send requests on the ensuing connection to
|
||||
sign votes and proposals. Thus the external process initiates the connection,
|
||||
but the Tendermint process makes all requests. In a later stage we're going to
|
||||
support multiple validators for fault tolerance. To prevent double signing they
|
||||
need to be synced, which is deferred to an external solution (see #1185).
|
||||
|
||||
In addition, Tendermint will provide implementations that can be run in that
|
||||
external process. These include:
|
||||
|
||||
- FilePV will encrypt the private key, and the user must enter password to
|
||||
decrypt key when process is started.
|
||||
- LedgerPV uses a Ledger Nano S to handle all signing.
|
BIN
docs/assets/a_plus_t.png
Normal file
BIN
docs/assets/a_plus_t.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 14 KiB |
BIN
docs/assets/tm-application-example.png
Normal file
BIN
docs/assets/tm-application-example.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 26 KiB |
25
docs/conf.py
25
docs/conf.py
@@ -41,15 +41,15 @@ templates_path = ['_templates']
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
source_suffix = ['.rst', '.md']
|
||||
# source_suffix = '.rst'
|
||||
#source_suffix = ['.rst', '.md']
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Tendermint'
|
||||
copyright = u'2017, The Authors'
|
||||
copyright = u'2018, The Authors'
|
||||
author = u'Tendermint'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
@@ -71,7 +71,7 @@ language = None
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This patterns also effect to html_static_path and html_extra_path
|
||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture']
|
||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture', 'spec', 'examples']
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
@@ -184,25 +184,14 @@ if os.path.isdir(tools_dir) != True:
|
||||
if os.path.isdir(assets_dir) != True:
|
||||
os.mkdir(assets_dir)
|
||||
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/ansible/README.rst', filename=tools_dir+'/ansible.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/ansible/assets/a_plus_t.png', filename=assets_dir+'/a_plus_t.png')
|
||||
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/docker/README.rst', filename=tools_dir+'/docker.rst')
|
||||
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/README.rst', filename=tools_dir+'/mintnet-kubernetes.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/gce1.png', filename=assets_dir+'/gce1.png')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/gce2.png', filename=assets_dir+'/gce2.png')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/statefulset.png', filename=assets_dir+'/statefulset.png')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets_dir+'/t_plus_k.png')
|
||||
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/terraform-digitalocean/README.rst', filename=tools_dir+'/terraform-digitalocean.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking-and-monitoring.rst')
|
||||
# the readme for below is included in tm-bench
|
||||
# urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/tm-monitor.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking.rst')
|
||||
urllib.urlretrieve(tools_repo+tools_branch+'/tm-monitor/README.rst', filename='tools/monitoring.rst')
|
||||
|
||||
#### abci spec #################################
|
||||
|
||||
abci_repo = "https://raw.githubusercontent.com/tendermint/abci/"
|
||||
abci_branch = "spec-docs"
|
||||
abci_branch = "develop"
|
||||
|
||||
urllib.urlretrieve(abci_repo+abci_branch+'/specification.rst', filename='abci-spec.rst')
|
||||
|
@@ -3,88 +3,62 @@ Deploy a Testnet
|
||||
|
||||
Now that we've seen how ABCI works, and even played with a few
|
||||
applications on a single validator node, it's time to deploy a test
|
||||
network to four validator nodes. For this deployment, we'll use the
|
||||
``basecoin`` application.
|
||||
network to four validator nodes.
|
||||
|
||||
Manual Deployments
|
||||
------------------
|
||||
|
||||
It's relatively easy to setup a Tendermint cluster manually. The only
|
||||
requirements for a particular Tendermint node are a private key for the
|
||||
validator, stored as ``priv_validator.json``, and a list of the public
|
||||
keys of all validators, stored as ``genesis.json``. These files should
|
||||
be stored in ``~/.tendermint/config``, or wherever the ``$TMHOME`` variable
|
||||
might be set to.
|
||||
validator, stored as ``priv_validator.json``, a node key, stored as
|
||||
``node_key.json`` and a list of the public keys of all validators, stored as
|
||||
``genesis.json``. These files should be stored in ``~/.tendermint/config``, or
|
||||
wherever the ``$TMHOME`` variable might be set to.
|
||||
|
||||
Here are the steps to setting up a testnet manually:
|
||||
|
||||
1) Provision nodes on your cloud provider of choice
|
||||
2) Install Tendermint and the application of interest on all nodes
|
||||
3) Generate a private key for each validator using
|
||||
``tendermint gen_validator``
|
||||
3) Generate a private key and a node key for each validator using
|
||||
``tendermint init``
|
||||
4) Compile a list of public keys for each validator into a
|
||||
``genesis.json`` file.
|
||||
5) Run ``tendermint node --p2p.persistent_peers=< peer addresses >`` on each node,
|
||||
``genesis.json`` file and replace the existing file with it.
|
||||
5) Run ``tendermint node --proxy_app=kvstore --p2p.persistent_peers=< peer addresses >`` on each node,
|
||||
where ``< peer addresses >`` is a comma separated list of the IP:PORT
|
||||
combination for each node. The default port for Tendermint is
|
||||
``46656``. Thus, if the IP addresses of your nodes were
|
||||
``192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4``, the command
|
||||
would look like:
|
||||
``tendermint node --p2p.persistent_peers=192.168.0.1:46656,192.168.0.2:46656,192.168.0.3:46656,192.168.0.4:46656``.
|
||||
|
||||
::
|
||||
|
||||
tendermint node --proxy_app=kvstore --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:46656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:46656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:46656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:46656
|
||||
|
||||
After a few seconds, all the nodes should connect to each other and start
|
||||
making blocks! For more information, see the Tendermint Networks section
|
||||
of `the guide to using Tendermint <using-tendermint.html>`__.
|
||||
|
||||
But wait! Steps 3 and 4 are quite manual. Instead, use `this script <https://github.com/tendermint/tendermint/blob/develop/docs/examples/init_testnet.sh>`__, which does the heavy lifting for you. And it gets better.
|
||||
|
||||
Instead of the previously linked script to initialize the files required for a testnet, we have the ``tendermint testnet`` command. By default, running ``tendermint testnet`` will create all the required files, just like the script. Of course, you'll still need to manually edit some fields in the ``config.toml``. Alternatively, see the available flags to auto-populate the ``config.toml`` with the fields that would otherwise be passed in via flags when running ``tendermint node``. As you might imagine, this command is useful for manual or automated deployments.
|
||||
|
||||
Automated Deployments
|
||||
---------------------
|
||||
|
||||
While the manual deployment is easy enough, an automated deployment is
|
||||
usually quicker. The below examples show different tools that can be used
|
||||
for automated deployments.
|
||||
The easiest and fastest way to get a testnet up in less than 5 minutes.
|
||||
|
||||
Automated Deployment using Kubernetes
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Local
|
||||
^^^^^
|
||||
|
||||
The `mintnet-kubernetes tool <https://github.com/tendermint/tools/tree/master/mintnet-kubernetes>`__
|
||||
allows automating the deployment of a Tendermint network on an already
|
||||
provisioned Kubernetes cluster. For simple provisioning of a Kubernetes
|
||||
cluster, check out the `Google Cloud Platform <https://cloud.google.com/>`__.
|
||||
|
||||
Automated Deployment using Terraform and Ansible
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The `terraform-digitalocean tool <https://github.com/tendermint/tools/tree/master/terraform-digitalocean>`__
|
||||
allows creating a set of servers on the DigitalOcean cloud.
|
||||
|
||||
The `ansible playbooks <https://github.com/tendermint/tools/tree/master/ansible>`__
|
||||
allow creating and managing a ``basecoin`` or ``ethermint`` testnet on provisioned servers.
|
||||
|
||||
Package Deployment on Linux for developers
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``tendermint`` and ``basecoin`` applications can be installed from RPM or DEB packages on
|
||||
Linux machines for development purposes. The packages are configured to be validators on the
|
||||
one-node network that the machine represents. The services are not started after installation,
|
||||
this way giving an opportunity to reconfigure the applications before starting.
|
||||
|
||||
The Ansible playbooks in the previous section use this repository to install ``basecoin``.
|
||||
After installation, additional steps are executed to make sure that the multi-node testnet has
|
||||
the right configuration before start.
|
||||
|
||||
Install from the CentOS/RedHat repository:
|
||||
With ``docker`` and ``docker-compose`` installed, run the command:
|
||||
|
||||
::
|
||||
|
||||
rpm --import https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint
|
||||
wget -O /etc/yum.repos.d/tendermint.repo https://tendermint-packages.interblock.io/centos/7/os/x86_64/tendermint.repo
|
||||
yum install basecoin
|
||||
make localnet-start
|
||||
|
||||
Install from the Debian/Ubuntu repository:
|
||||
from the root of the tendermint repository. This will spin up a 4-node local testnet.
|
||||
|
||||
::
|
||||
|
||||
wget -O - https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | apt-key add -
|
||||
wget -O /etc/apt/sources.list.d/tendermint.list https://tendermint-packages.interblock.io/debian/tendermint.list
|
||||
apt-get update && apt-get install basecoin
|
||||
Cloud
|
||||
^^^^^
|
||||
|
||||
See the `next section <./terraform-and-ansible.html>`__ for details.
|
||||
|
8
docs/determinism.rst
Normal file
8
docs/determinism.rst
Normal file
@@ -0,0 +1,8 @@
|
||||
On Determinism
|
||||
==============
|
||||
|
||||
Arguably, the most difficult part of blockchain programming is determinism - that is, ensuring that sources of indeterminism do not creep into the design of such systems.
|
||||
|
||||
See `this issue <https://github.com/tendermint/abci/issues/56>`__ for more information on the potential sources of indeterminism.
|
||||
|
||||
|
@@ -5,7 +5,7 @@ The growing list of applications built using various pieces of the Tendermint st
|
||||
|
||||
* https://tendermint.com/ecosystem
|
||||
|
||||
We thank the community for their contributions thus far and welcome the addition of new projects. A pull request can be submitted to `this file <https://github.com/tendermint/tendermint/blob/master/docs/ecosystem.rst>`__ to include your project.
|
||||
We thank the community for their contributions thus far and welcome the addition of new projects. A pull request can be submitted to `this file <https://github.com/tendermint/aib-data/blob/master/json/ecosystem.json>`__ to include your project.
|
||||
|
||||
Other Tools
|
||||
-----------
|
||||
|
@@ -2,17 +2,18 @@
|
||||
|
||||
## Overview
|
||||
|
||||
This is a quick start guide. If you have a vague idea about how Tendermint works
|
||||
and want to get started right away, continue. Otherwise, [review the documentation](http://tendermint.readthedocs.io/en/master/)
|
||||
This is a quick start guide. If you have a vague idea about how Tendermint
|
||||
works and want to get started right away, continue. Otherwise, [review the
|
||||
documentation](http://tendermint.readthedocs.io/en/master/).
|
||||
|
||||
## Install
|
||||
|
||||
### Quick Install
|
||||
|
||||
On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vNLfY), like so:
|
||||
On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vpgEI), like so:
|
||||
|
||||
```
|
||||
curl -L https://git.io/vNLfY | bash
|
||||
curl -L https://git.io/vpgEI | bash
|
||||
source ~/.profile
|
||||
```
|
||||
|
||||
@@ -23,7 +24,7 @@ The script is also used to facilitate cluster deployment below.
|
||||
### Manual Install
|
||||
|
||||
Requires:
|
||||
- `go` minimum version 1.9
|
||||
- `go` minimum version 1.10
|
||||
- `$GOPATH` environment variable must be set
|
||||
- `$GOPATH/bin` must be on your `$PATH` (see https://github.com/tendermint/tendermint/wiki/Setting-GOPATH)
|
||||
|
||||
@@ -42,7 +43,7 @@ Confirm installation:
|
||||
|
||||
```
|
||||
$ tendermint version
|
||||
0.15.0-381fe19
|
||||
0.18.0-XXXXXXX
|
||||
```
|
||||
|
||||
## Initialization
|
||||
@@ -71,7 +72,7 @@ Configuring a cluster is covered further below.
|
||||
Start tendermint with a simple in-process application:
|
||||
|
||||
```
|
||||
tendermint node --proxy_app=dummy
|
||||
tendermint node --proxy_app=kvstore
|
||||
```
|
||||
|
||||
and blocks will start to stream in:
|
||||
@@ -89,7 +90,7 @@ curl -s localhost:46657/status
|
||||
|
||||
### Sending Transactions
|
||||
|
||||
With the dummy app running, we can send transactions:
|
||||
With the kvstore app running, we can send transactions:
|
||||
|
||||
```
|
||||
curl -s 'localhost:46657/broadcast_tx_commit?tx="abcd"'
|
||||
@@ -117,12 +118,14 @@ where the value is returned in hex.
|
||||
|
||||
## Cluster of Nodes
|
||||
|
||||
First create four Ubuntu cloud machines. The following was tested on Digital Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP addresses below as IP1, IP2, IP3, IP4.
|
||||
First create four Ubuntu cloud machines. The following was tested on Digital
|
||||
Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP
|
||||
addresses below as IP1, IP2, IP3, IP4.
|
||||
|
||||
Then, `ssh` into each machine, and execute [this script](https://git.io/vNLfY):
|
||||
|
||||
```
|
||||
curl -L https://git.io/vNLfY | bash
|
||||
curl -L https://git.io/vpgEI | bash
|
||||
source ~/.profile
|
||||
```
|
||||
|
||||
@@ -131,12 +134,16 @@ This will install `go` and other dependencies, get the Tendermint source code, t
|
||||
Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence:
|
||||
|
||||
```
|
||||
tendermint node --home ./node1 --proxy_app=dummy --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656
|
||||
tendermint node --home ./node2 --proxy_app=dummy --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656
|
||||
tendermint node --home ./node3 --proxy_app=dummy --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656
|
||||
tendermint node --home ./node4 --proxy_app=dummy --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656
|
||||
tendermint node --home ./node0 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
|
||||
tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
|
||||
tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
|
||||
tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:46656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:46656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:46656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:46656"
|
||||
```
|
||||
|
||||
Note that after the third node is started, blocks will start to stream in because >2/3 of validators (defined in the `genesis.json`) have come online. Seeds can also be specified in the `config.toml`. See [this PR](https://github.com/tendermint/tendermint/pull/792) for more information about configuration options.
|
||||
Note that after the third node is started, blocks will start to stream in
|
||||
because >2/3 of validators (defined in the `genesis.json`) have come online.
|
||||
Seeds can also be specified in the `config.toml`. See [this
|
||||
PR](https://github.com/tendermint/tendermint/pull/792) for more information
|
||||
about configuration options.
|
||||
|
||||
Transactions can then be sent as covered in the single, local node example above.
|
||||
|
69
docs/examples/init_testnet.sh
Normal file
69
docs/examples/init_testnet.sh
Normal file
@@ -0,0 +1,69 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# make all the files
|
||||
tendermint init --home ./tester/node0
|
||||
tendermint init --home ./tester/node1
|
||||
tendermint init --home ./tester/node2
|
||||
tendermint init --home ./tester/node3
|
||||
|
||||
file0=./tester/node0/config/genesis.json
|
||||
file1=./tester/node1/config/genesis.json
|
||||
file2=./tester/node2/config/genesis.json
|
||||
file3=./tester/node3/config/genesis.json
|
||||
|
||||
genesis_time=`cat $file0 | jq '.genesis_time'`
|
||||
chain_id=`cat $file0 | jq '.chain_id'`
|
||||
|
||||
value0=`cat $file0 | jq '.validators[0].pub_key.value'`
|
||||
value1=`cat $file1 | jq '.validators[0].pub_key.value'`
|
||||
value2=`cat $file2 | jq '.validators[0].pub_key.value'`
|
||||
value3=`cat $file3 | jq '.validators[0].pub_key.value'`
|
||||
|
||||
rm $file0
|
||||
rm $file1
|
||||
rm $file2
|
||||
rm $file3
|
||||
|
||||
echo "{
|
||||
\"genesis_time\": $genesis_time,
|
||||
\"chain_id\": $chain_id,
|
||||
\"validators\": [
|
||||
{
|
||||
\"pub_key\": {
|
||||
\"type\": \"AC26791624DE60\",
|
||||
\"value\": $value0
|
||||
},
|
||||
\"power:\": 10,
|
||||
\"name\":, \"\"
|
||||
},
|
||||
{
|
||||
\"pub_key\": {
|
||||
\"type\": \"AC26791624DE60\",
|
||||
\"value\": $value1
|
||||
},
|
||||
\"power:\": 10,
|
||||
\"name\":, \"\"
|
||||
},
|
||||
{
|
||||
\"pub_key\": {
|
||||
\"type\": \"AC26791624DE60\",
|
||||
\"value\": $value2
|
||||
},
|
||||
\"power:\": 10,
|
||||
\"name\":, \"\"
|
||||
},
|
||||
{
|
||||
\"pub_key\": {
|
||||
\"type\": \"AC26791624DE60\",
|
||||
\"value\": $value3
|
||||
},
|
||||
\"power:\": 10,
|
||||
\"name\":, \"\"
|
||||
}
|
||||
],
|
||||
\"app_hash\": \"\"
|
||||
}" >> $file0
|
||||
|
||||
cp $file0 $file1
|
||||
cp $file0 $file2
|
||||
cp $file2 $file3
|
@@ -4,8 +4,8 @@
|
||||
# and has only been tested on Digital Ocean
|
||||
|
||||
# get and unpack golang
|
||||
curl -O https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz
|
||||
tar -xvf go1.9.2.linux-amd64.tar.gz
|
||||
curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz
|
||||
tar -xvf go1.10.linux-amd64.tar.gz
|
||||
|
||||
apt install make
|
||||
|
||||
@@ -26,7 +26,7 @@ go get $REPO
|
||||
cd $GOPATH/src/$REPO
|
||||
|
||||
## build
|
||||
git checkout v0.15.0
|
||||
git checkout master
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make install
|
||||
make install
|
||||
|
169
docs/examples/node0/config/config.toml
Normal file
169
docs/examples/node0/config/config.toml
Normal file
@@ -0,0 +1,169 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
##### main base config options #####
|
||||
|
||||
# TCP or UNIX socket address of the ABCI application,
|
||||
# or the name of an ABCI application compiled in with the Tendermint binary
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
|
||||
# A custom human readable name for this node
|
||||
moniker = "alpha"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast_sync = true
|
||||
|
||||
# Database backend: leveldb | memdb
|
||||
db_backend = "leveldb"
|
||||
|
||||
# Database directory
|
||||
db_path = "data"
|
||||
|
||||
# Output level for logging, including package level options
|
||||
log_level = "main:info,state:info,*:error"
|
||||
|
||||
##### additional base config options #####
|
||||
|
||||
# Path to the JSON file containing the initial validator set and other meta data
|
||||
genesis_file = "config/genesis.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
priv_validator_file = "config/priv_validator.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
|
||||
node_key_file = "config/node_key.json"
|
||||
|
||||
# Mechanism to connect to the ABCI application: socket | grpc
|
||||
abci = "socket"
|
||||
|
||||
# TCP or UNIX socket address for the profiling server to listen on
|
||||
prof_laddr = ""
|
||||
|
||||
# If true, query the ABCI app on connecting to a new peer
|
||||
# so the app can decide if we should keep the connection or not
|
||||
filter_peers = false
|
||||
|
||||
##### advanced configuration options #####
|
||||
|
||||
##### rpc server configuration options #####
|
||||
[rpc]
|
||||
|
||||
# TCP or UNIX socket address for the RPC server to listen on
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
grpc_laddr = ""
|
||||
|
||||
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||
unsafe = false
|
||||
|
||||
##### peer to peer configuration options #####
|
||||
[p2p]
|
||||
|
||||
# Address to listen for incoming connections
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = ""
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
# Do not add private peers to this list if you don't want them advertised
|
||||
persistent_peers = ""
|
||||
|
||||
# Path to address book
|
||||
addr_book_file = "config/addrbook.json"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
addr_book_strict = true
|
||||
|
||||
# Time to wait before flushing messages out on the connection, in ms
|
||||
flush_throttle_timeout = 100
|
||||
|
||||
# Maximum number of peers to connect to
|
||||
max_num_peers = 50
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
max_packet_msg_payload_size = 1024
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
send_rate = 512000
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
recv_rate = 512000
|
||||
|
||||
# Set true to enable the peer-exchange reactor
|
||||
pex = true
|
||||
|
||||
# Seed mode, in which node constantly crawls the network and looks for
|
||||
# peers. If another node asks it for addresses, it responds and disconnects.
|
||||
#
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = false
|
||||
|
||||
# Authenticated encryption
|
||||
auth_enc = true
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = ""
|
||||
|
||||
##### mempool configuration options #####
|
||||
[mempool]
|
||||
|
||||
recheck = true
|
||||
recheck_empty = true
|
||||
broadcast = true
|
||||
wal_dir = "data/mempool.wal"
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
wal_file = "data/cs.wal/wal"
|
||||
|
||||
# All timeouts are in milliseconds
|
||||
timeout_propose = 3000
|
||||
timeout_propose_delta = 500
|
||||
timeout_prevote = 1000
|
||||
timeout_prevote_delta = 500
|
||||
timeout_precommit = 1000
|
||||
timeout_precommit_delta = 500
|
||||
timeout_commit = 1000
|
||||
|
||||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
skip_timeout_commit = false
|
||||
|
||||
# BlockSize
|
||||
max_block_size_txs = 10000
|
||||
max_block_size_bytes = 1
|
||||
|
||||
# EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||
create_empty_blocks = true
|
||||
create_empty_blocks_interval = 0
|
||||
|
||||
# Reactor sleep duration parameters are in milliseconds
|
||||
peer_gossip_sleep_duration = 100
|
||||
peer_query_maj23_sleep_duration = 2000
|
||||
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null" (default)
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "kv"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||
#
|
||||
# It's recommended to index only a subset of tags due to possible memory
|
||||
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||
# transactions.
|
||||
index_tags = ""
|
||||
|
||||
# When set to true, tells indexer to index all tags. Note this may be not
|
||||
# desirable (see the comment above). IndexTags has a precedence over
|
||||
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||
index_all_tags = false
|
39
docs/examples/node0/config/genesis.json
Normal file
39
docs/examples/node0/config/genesis.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"genesis_time": "0001-01-01T00:00:00Z",
|
||||
"chain_id": "test-chain-A2i3OZ",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
}
|
||||
],
|
||||
"app_hash": ""
|
||||
}
|
1
docs/examples/node0/config/node_key.json
Normal file
1
docs/examples/node0/config/node_key.json
Normal file
@@ -0,0 +1 @@
|
||||
{"priv_key":{"type":"954568A3288910","value":"7lY+k6EDllG8Q9gVbF5313t/ag2YGkBVKdVa0YHJ9xO5k0w3Q/hke0Z7UFT1KgVDGRUEKzwAwwjwFQUvgF0ZWg=="}}
|
14
docs/examples/node0/config/priv_validator.json
Normal file
14
docs/examples/node0/config/priv_validator.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"address": "122A9414774A2FCAD026201DA477EF3F41970EF0",
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"priv_key": {
|
||||
"type": "954568A3288910",
|
||||
"value": "YLxp3ho+kySgAnzjBptbxDzSGw2ntGZLsIHQsaVxY/cP6TgB2Odg9ZsH3CZp3XfsF2mj+QC6U6hNFCsvL9BziQ=="
|
||||
}
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
moniker = "penguin"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = ""
|
169
docs/examples/node1/config/config.toml
Normal file
169
docs/examples/node1/config/config.toml
Normal file
@@ -0,0 +1,169 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
##### main base config options #####
|
||||
|
||||
# TCP or UNIX socket address of the ABCI application,
|
||||
# or the name of an ABCI application compiled in with the Tendermint binary
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
|
||||
# A custom human readable name for this node
|
||||
moniker = "bravo"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast_sync = true
|
||||
|
||||
# Database backend: leveldb | memdb
|
||||
db_backend = "leveldb"
|
||||
|
||||
# Database directory
|
||||
db_path = "data"
|
||||
|
||||
# Output level for logging, including package level options
|
||||
log_level = "main:info,state:info,*:error"
|
||||
|
||||
##### additional base config options #####
|
||||
|
||||
# Path to the JSON file containing the initial validator set and other meta data
|
||||
genesis_file = "config/genesis.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
priv_validator_file = "config/priv_validator.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
|
||||
node_key_file = "config/node_key.json"
|
||||
|
||||
# Mechanism to connect to the ABCI application: socket | grpc
|
||||
abci = "socket"
|
||||
|
||||
# TCP or UNIX socket address for the profiling server to listen on
|
||||
prof_laddr = ""
|
||||
|
||||
# If true, query the ABCI app on connecting to a new peer
|
||||
# so the app can decide if we should keep the connection or not
|
||||
filter_peers = false
|
||||
|
||||
##### advanced configuration options #####
|
||||
|
||||
##### rpc server configuration options #####
|
||||
[rpc]
|
||||
|
||||
# TCP or UNIX socket address for the RPC server to listen on
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
grpc_laddr = ""
|
||||
|
||||
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||
unsafe = false
|
||||
|
||||
##### peer to peer configuration options #####
|
||||
[p2p]
|
||||
|
||||
# Address to listen for incoming connections
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = ""
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
# Do not add private peers to this list if you don't want them advertised
|
||||
persistent_peers = ""
|
||||
|
||||
# Path to address book
|
||||
addr_book_file = "config/addrbook.json"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
addr_book_strict = true
|
||||
|
||||
# Time to wait before flushing messages out on the connection, in ms
|
||||
flush_throttle_timeout = 100
|
||||
|
||||
# Maximum number of peers to connect to
|
||||
max_num_peers = 50
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
max_packet_msg_payload_size = 1024
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
send_rate = 512000
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
recv_rate = 512000
|
||||
|
||||
# Set true to enable the peer-exchange reactor
|
||||
pex = true
|
||||
|
||||
# Seed mode, in which node constantly crawls the network and looks for
|
||||
# peers. If another node asks it for addresses, it responds and disconnects.
|
||||
#
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = false
|
||||
|
||||
# Authenticated encryption
|
||||
auth_enc = true
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = ""
|
||||
|
||||
##### mempool configuration options #####
|
||||
[mempool]
|
||||
|
||||
recheck = true
|
||||
recheck_empty = true
|
||||
broadcast = true
|
||||
wal_dir = "data/mempool.wal"
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
wal_file = "data/cs.wal/wal"
|
||||
|
||||
# All timeouts are in milliseconds
|
||||
timeout_propose = 3000
|
||||
timeout_propose_delta = 500
|
||||
timeout_prevote = 1000
|
||||
timeout_prevote_delta = 500
|
||||
timeout_precommit = 1000
|
||||
timeout_precommit_delta = 500
|
||||
timeout_commit = 1000
|
||||
|
||||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
skip_timeout_commit = false
|
||||
|
||||
# BlockSize
|
||||
max_block_size_txs = 10000
|
||||
max_block_size_bytes = 1
|
||||
|
||||
# EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||
create_empty_blocks = true
|
||||
create_empty_blocks_interval = 0
|
||||
|
||||
# Reactor sleep duration parameters are in milliseconds
|
||||
peer_gossip_sleep_duration = 100
|
||||
peer_query_maj23_sleep_duration = 2000
|
||||
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null" (default)
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "kv"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||
#
|
||||
# It's recommended to index only a subset of tags due to possible memory
|
||||
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||
# transactions.
|
||||
index_tags = ""
|
||||
|
||||
# When set to true, tells indexer to index all tags. Note this may be not
|
||||
# desirable (see the comment above). IndexTags has a precedence over
|
||||
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||
index_all_tags = false
|
39
docs/examples/node1/config/genesis.json
Normal file
39
docs/examples/node1/config/genesis.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"genesis_time": "0001-01-01T00:00:00Z",
|
||||
"chain_id": "test-chain-A2i3OZ",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
}
|
||||
],
|
||||
"app_hash": ""
|
||||
}
|
1
docs/examples/node1/config/node_key.json
Normal file
1
docs/examples/node1/config/node_key.json
Normal file
@@ -0,0 +1 @@
|
||||
{"priv_key":{"type":"954568A3288910","value":"H71dc/TIG7nTselfa9nG0WRArXLKYnm7P5eFCk2lk8ASKQ3sIHpbdxCSHQD/RcdHe7TiabJeuOssNPvPWiyQEQ=="}}
|
14
docs/examples/node1/config/priv_validator.json
Normal file
14
docs/examples/node1/config/priv_validator.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"address": "BEA1B57F5806CF9AC4D54C8CF806DED5C0F102E1",
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"priv_key": {
|
||||
"type": "954568A3288910",
|
||||
"value": "o0IqrHSPtd5YqGefodWxpJuRzvuVBjgbH785vbMgk7Vvno3kYJHVp1xVG4Q2N8rD+aubZ2SFPvA1ldX9IOwqxQ=="
|
||||
}
|
||||
}
|
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"genesis_time":"0001-01-01T00:00:00Z",
|
||||
"chain_id":"test-chain-wt7apy",
|
||||
"validators":[
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node1"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node2"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node3"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"address":"4DC2756029CE0D8F8C6C3E4C3CE6EE8C30AF352F",
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
},
|
||||
"last_height":0,
|
||||
"last_round":0,
|
||||
"last_step":0,
|
||||
"last_signature":null,
|
||||
"priv_key":{
|
||||
"type":"ed25519",
|
||||
"data":"4D3648E1D93C8703E436BFF814728B6BD270CFDFD686DF5385E8ACBEB7BE2D7DF08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
}
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
moniker = "penguin"
|
||||
fast_sync = true
|
||||
db_backend = "leveldb"
|
||||
log_level = "state:info,*:error"
|
||||
|
||||
[rpc]
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
[p2p]
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
seeds = ""
|
169
docs/examples/node2/config/config.toml
Normal file
169
docs/examples/node2/config/config.toml
Normal file
@@ -0,0 +1,169 @@
|
||||
# This is a TOML config file.
|
||||
# For more information, see https://github.com/toml-lang/toml
|
||||
|
||||
##### main base config options #####
|
||||
|
||||
# TCP or UNIX socket address of the ABCI application,
|
||||
# or the name of an ABCI application compiled in with the Tendermint binary
|
||||
proxy_app = "tcp://127.0.0.1:46658"
|
||||
|
||||
# A custom human readable name for this node
|
||||
moniker = "charlie"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast_sync = true
|
||||
|
||||
# Database backend: leveldb | memdb
|
||||
db_backend = "leveldb"
|
||||
|
||||
# Database directory
|
||||
db_path = "data"
|
||||
|
||||
# Output level for logging, including package level options
|
||||
log_level = "main:info,state:info,*:error"
|
||||
|
||||
##### additional base config options #####
|
||||
|
||||
# Path to the JSON file containing the initial validator set and other meta data
|
||||
genesis_file = "config/genesis.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
priv_validator_file = "config/priv_validator.json"
|
||||
|
||||
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
|
||||
node_key_file = "config/node_key.json"
|
||||
|
||||
# Mechanism to connect to the ABCI application: socket | grpc
|
||||
abci = "socket"
|
||||
|
||||
# TCP or UNIX socket address for the profiling server to listen on
|
||||
prof_laddr = ""
|
||||
|
||||
# If true, query the ABCI app on connecting to a new peer
|
||||
# so the app can decide if we should keep the connection or not
|
||||
filter_peers = false
|
||||
|
||||
##### advanced configuration options #####
|
||||
|
||||
##### rpc server configuration options #####
|
||||
[rpc]
|
||||
|
||||
# TCP or UNIX socket address for the RPC server to listen on
|
||||
laddr = "tcp://0.0.0.0:46657"
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
grpc_laddr = ""
|
||||
|
||||
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||
unsafe = false
|
||||
|
||||
##### peer to peer configuration options #####
|
||||
[p2p]
|
||||
|
||||
# Address to listen for incoming connections
|
||||
laddr = "tcp://0.0.0.0:46656"
|
||||
|
||||
# Comma separated list of seed nodes to connect to
|
||||
seeds = ""
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
# Do not add private peers to this list if you don't want them advertised
|
||||
persistent_peers = ""
|
||||
|
||||
# Path to address book
|
||||
addr_book_file = "config/addrbook.json"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
addr_book_strict = true
|
||||
|
||||
# Time to wait before flushing messages out on the connection, in ms
|
||||
flush_throttle_timeout = 100
|
||||
|
||||
# Maximum number of peers to connect to
|
||||
max_num_peers = 50
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
max_packet_msg_payload_size = 1024
|
||||
|
||||
# Rate at which packets can be sent, in bytes/second
|
||||
send_rate = 512000
|
||||
|
||||
# Rate at which packets can be received, in bytes/second
|
||||
recv_rate = 512000
|
||||
|
||||
# Set true to enable the peer-exchange reactor
|
||||
pex = true
|
||||
|
||||
# Seed mode, in which node constantly crawls the network and looks for
|
||||
# peers. If another node asks it for addresses, it responds and disconnects.
|
||||
#
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed_mode = false
|
||||
|
||||
# Authenticated encryption
|
||||
auth_enc = true
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
private_peer_ids = ""
|
||||
|
||||
##### mempool configuration options #####
|
||||
[mempool]
|
||||
|
||||
recheck = true
|
||||
recheck_empty = true
|
||||
broadcast = true
|
||||
wal_dir = "data/mempool.wal"
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
wal_file = "data/cs.wal/wal"
|
||||
|
||||
# All timeouts are in milliseconds
|
||||
timeout_propose = 3000
|
||||
timeout_propose_delta = 500
|
||||
timeout_prevote = 1000
|
||||
timeout_prevote_delta = 500
|
||||
timeout_precommit = 1000
|
||||
timeout_precommit_delta = 500
|
||||
timeout_commit = 1000
|
||||
|
||||
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
|
||||
skip_timeout_commit = false
|
||||
|
||||
# BlockSize
|
||||
max_block_size_txs = 10000
|
||||
max_block_size_bytes = 1
|
||||
|
||||
# EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||
create_empty_blocks = true
|
||||
create_empty_blocks_interval = 0
|
||||
|
||||
# Reactor sleep duration parameters are in milliseconds
|
||||
peer_gossip_sleep_duration = 100
|
||||
peer_query_maj23_sleep_duration = 2000
|
||||
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null" (default)
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "kv"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||
#
|
||||
# It's recommended to index only a subset of tags due to possible memory
|
||||
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||
# transactions.
|
||||
index_tags = ""
|
||||
|
||||
# When set to true, tells indexer to index all tags. Note this may be not
|
||||
# desirable (see the comment above). IndexTags has a precedence over
|
||||
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||
index_all_tags = false
|
39
docs/examples/node2/config/genesis.json
Normal file
39
docs/examples/node2/config/genesis.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"genesis_time": "0001-01-01T00:00:00Z",
|
||||
"chain_id": "test-chain-A2i3OZ",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
},
|
||||
{
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU="
|
||||
},
|
||||
"power": 10,
|
||||
"name": ""
|
||||
}
|
||||
],
|
||||
"app_hash": ""
|
||||
}
|
1
docs/examples/node2/config/node_key.json
Normal file
1
docs/examples/node2/config/node_key.json
Normal file
@@ -0,0 +1 @@
|
||||
{"priv_key":{"type":"954568A3288910","value":"COHZ/Y2cWGWxJNkRwtpQBt5sYvOnb6Gpz0lO46XERRJFBIdSWD5x1UMGRSTmnvW1ec5G4bMdg6zUZKOZD+vVPg=="}}
|
14
docs/examples/node2/config/priv_validator.json
Normal file
14
docs/examples/node2/config/priv_validator.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"address": "F0AA266949FB29ADA0B679C27889ED930BD1BDA1",
|
||||
"pub_key": {
|
||||
"type": "AC26791624DE60",
|
||||
"value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU="
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"priv_key": {
|
||||
"type": "954568A3288910",
|
||||
"value": "khADeZ5K/8u/L99DFaZNRq8V5g+EHWbwfqFjhCrppaAiBkOkm8YDRMBqaJwDyKtzL5Ff8GRSWPoNfAzv3XLAhQ=="
|
||||
}
|
||||
}
|
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"genesis_time":"0001-01-01T00:00:00Z",
|
||||
"chain_id":"test-chain-wt7apy",
|
||||
"validators":[
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node1"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node2"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node3"
|
||||
}
|
||||
,
|
||||
{
|
||||
"pub_key":{
|
||||
"type":"ed25519",
|
||||
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
|
||||
},
|
||||
"power":10,
|
||||
"name":"node4"
|
||||
}
|
||||
],
|
||||
"app_hash":""
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"address": "DD6C63A762608A9DDD4A845657743777F63121D6",
|
||||
"pub_key": {
|
||||
"type": "ed25519",
|
||||
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
},
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_step": 0,
|
||||
"last_signature": null,
|
||||
"priv_key": {
|
||||
"type": "ed25519",
|
||||
"data": "7B0DE666FF5E9B437D284BCE767F612381890C018B93B0A105D2E829A568DA6FA8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user