mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-17 05:11:57 +00:00
Compare commits
332 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
d419fffe18 | ||
|
c8895dab98 | ||
|
8b94deca73 | ||
|
4cd2e40fb1 | ||
|
47dc4e6428 | ||
|
94288006ba | ||
|
22445a5029 | ||
|
299d46304d | ||
|
5106af484f | ||
|
114c405120 | ||
|
246a56283a | ||
|
1144e72c61 | ||
|
3fd54c5df5 | ||
|
20c55cffc4 | ||
|
dea34506fb | ||
|
54fe6ef73c | ||
|
6fd79d1545 | ||
|
8fcabe8b05 | ||
|
e0fa827a53 | ||
|
bdf3238710 | ||
|
ed9e00a8a7 | ||
|
604eae86b6 | ||
|
b616e54c9b | ||
|
fae21c9f52 | ||
|
8c0c4844b6 | ||
|
bbc30f992e | ||
|
91f8af8fd8 | ||
|
0881068d76 | ||
|
80c217089a | ||
|
61914cf48e | ||
|
4416c9e4bc | ||
|
c9510d0f50 | ||
|
892b170818 | ||
|
7f6bd5c161 | ||
|
eb5cf0f0dd | ||
|
2a3520a714 | ||
|
39ab199181 | ||
|
4c7591cf6b | ||
|
0149c8ffee | ||
|
9db66deaa2 | ||
|
098681fd91 | ||
|
cb91cd5965 | ||
|
92185c017c | ||
|
d0bb1ab2b0 | ||
|
d27cd972d2 | ||
|
29d2db352e | ||
|
eabb1ece8e | ||
|
166fd82b70 | ||
|
1de32fba17 | ||
|
7b88172f41 | ||
|
ffe91ae9e3 | ||
|
03afad3218 | ||
|
5ecdfacb8e | ||
|
9e940b95ad | ||
|
4147f856dc | ||
|
02d1b03abb | ||
|
e873fed815 | ||
|
e957f322c7 | ||
|
ad3d42981a | ||
|
0f7485690e | ||
|
d73c5cbdb1 | ||
|
7b2f7090fd | ||
|
61ab10d655 | ||
|
3a6cc5e6af | ||
|
c43fb700e3 | ||
|
bd531401a0 | ||
|
9d06d7e306 | ||
|
b1bc3e4f89 | ||
|
38b401657e | ||
|
8972b6e293 | ||
|
5f255f0f71 | ||
|
20e35654c6 | ||
|
8a84593c02 | ||
|
aab5947d82 | ||
|
2f7fc87230 | ||
|
1cf6712a36 | ||
|
43ebc77f9b | ||
|
debe56326f | ||
|
6dde320591 | ||
|
62b2093da5 | ||
|
76bb4b15c7 | ||
|
0cbf9b2a7d | ||
|
0701d79046 | ||
|
4f61b97bbe | ||
|
1111c1848d | ||
|
c919643c3e | ||
|
b189ab676f | ||
|
fe6a504374 | ||
|
91376627ea | ||
|
e3f54ece2f | ||
|
f26b83f15f | ||
|
1f6c7bf22a | ||
|
d69cf9dd2f | ||
|
4e78badac9 | ||
|
684e3cb446 | ||
|
a649deb6ee | ||
|
5446452b01 | ||
|
ad24d66750 | ||
|
eb98f1c3a9 | ||
|
728d2ed266 | ||
|
e10666859f | ||
|
6fad8eaf5a | ||
|
db53dc5fd4 | ||
|
2fe34491ba | ||
|
80e49abada | ||
|
0f931eeb10 | ||
|
89668c3179 | ||
|
d0dcb1cde1 | ||
|
ed08ae7321 | ||
|
6beaf6e72d | ||
|
3624a17642 | ||
|
8a1a79257e | ||
|
9c6fdad276 | ||
|
8d28344e84 | ||
|
4cf1dbd676 | ||
|
00db469fc0 | ||
|
93aadf160f | ||
|
2756be5a59 | ||
|
fc7c298cc0 | ||
|
785786bec4 | ||
|
aab26c3ff7 | ||
|
5a8fe61200 | ||
|
3c98cec2c2 | ||
|
1fbca09e3c | ||
|
37ea7040ef | ||
|
6770992b01 | ||
|
b30596b3a1 | ||
|
ef5c27a2d2 | ||
|
e1b9bf7c81 | ||
|
4e7bf10b59 | ||
|
67b6d51ff4 | ||
|
6dbbdb9438 | ||
|
e7dd76c28d | ||
|
21448bcf4f | ||
|
ec3e34efd8 | ||
|
b19e148bc5 | ||
|
6f8b62d1f3 | ||
|
e0e19a24a4 | ||
|
013b9cef64 | ||
|
087b657008 | ||
|
fe835cd456 | ||
|
d7035abe73 | ||
|
f2b629680a | ||
|
720ce658f1 | ||
|
309a6772d7 | ||
|
8bd514d9fb | ||
|
f903947ff3 | ||
|
ea67fb55eb | ||
|
e1062a657f | ||
|
4d998b7c03 | ||
|
bec9d5cba9 | ||
|
06a157ad06 | ||
|
d6a666b445 | ||
|
c5c2b9601f | ||
|
7538864c15 | ||
|
279259ec8e | ||
|
ca9d07e5e4 | ||
|
6e3c5e8033 | ||
|
8073e51b04 | ||
|
3161ebbc2f | ||
|
4cbeb30da2 | ||
|
d5b5e5a2e4 | ||
|
6691492540 | ||
|
0f80a7da82 | ||
|
ae2238efe6 | ||
|
2878c7523f | ||
|
b1cff0f9bf | ||
|
d09a3a6d3a | ||
|
87f09adeec | ||
|
b3a3c8a192 | ||
|
96fdec0fca | ||
|
fe5e7808f2 | ||
|
2d1c5a1ce6 | ||
|
00ebdcd581 | ||
|
2487210414 | ||
|
a040c36dfb | ||
|
d579f4c610 | ||
|
b82138b002 | ||
|
8ed99c2c13 | ||
|
4c5a143a70 | ||
|
b33f73eaf1 | ||
|
e719a93d1d | ||
|
eb9b37e196 | ||
|
eaa137512c | ||
|
023bb99eb0 | ||
|
f2f53442c6 | ||
|
24ae878b9f | ||
|
619bb3b2d7 | ||
|
dde96b75ce | ||
|
6fb2f44cc3 | ||
|
08ad162daa | ||
|
a83eed104c | ||
|
be642754f5 | ||
|
2608249e5b | ||
|
62b8ee270d | ||
|
0c7338c5f0 | ||
|
231cdc1320 | ||
|
fef4fe1c66 | ||
|
f00b52b710 | ||
|
188e459273 | ||
|
3d5d254932 | ||
|
ce9ddc7cd7 | ||
|
c03ad56d55 | ||
|
caef5dcd69 | ||
|
7634073718 | ||
|
af2894c0f8 | ||
|
a2debe57c7 | ||
|
5955eddc7d | ||
|
4bab34ae45 | ||
|
fe5b0d7074 | ||
|
96ae535fb8 | ||
|
4be6395ee0 | ||
|
bc526f18a4 | ||
|
40d6dc2ee5 | ||
|
d542d2c394 | ||
|
fd29fd6465 | ||
|
6241e6b927 | ||
|
18acd77e40 | ||
|
49b52ee3c7 | ||
|
e4dfab6349 | ||
|
0e127562bf | ||
|
9f19229791 | ||
|
bdab37a626 | ||
|
7bf28af590 | ||
|
1b04e4e5f1 | ||
|
66fe5b7bae | ||
|
24b94d7aa4 | ||
|
0bd4fb96f0 | ||
|
9cfc47a93b | ||
|
d212292f86 | ||
|
7ad92c44cb | ||
|
5fdbcd70df | ||
|
d149d8f96d | ||
|
74b6cc9057 | ||
|
6046b99197 | ||
|
359898dcac | ||
|
5768b67162 | ||
|
082557b7d4 | ||
|
8dc655dad2 | ||
|
70d3783747 | ||
|
60378fd7f9 | ||
|
c248ce5ef6 | ||
|
059a03a66a | ||
|
abe5b63059 | ||
|
a657870b3d | ||
|
7e7473ad41 | ||
|
75a26ebd6d | ||
|
ad580e2734 | ||
|
f6705f02c7 | ||
|
ea31c4836a | ||
|
f1093edbe2 | ||
|
b92860b6c4 | ||
|
54d753e64e | ||
|
e1b48b16c4 | ||
|
7c07235649 | ||
|
05a76fb517 | ||
|
15b112e669 | ||
|
2aef80bcff | ||
|
f3d519c966 | ||
|
9962e598a0 | ||
|
948b91e62e | ||
|
1e05242297 | ||
|
94e8252607 | ||
|
eb7dea1b0d | ||
|
e36ce6f893 | ||
|
c5c1689591 | ||
|
b41b89732d | ||
|
5e96421d44 | ||
|
c798702764 | ||
|
17c0029233 | ||
|
0f2d97dffe | ||
|
ed8714e40c | ||
|
6017d817e5 | ||
|
63835c0360 | ||
|
c82c60df11 | ||
|
67762aec73 | ||
|
0fbb465b8f | ||
|
2e75214316 | ||
|
5be456e5b1 | ||
|
1bd5476854 | ||
|
5037dd40c5 | ||
|
96818af9d5 | ||
|
571e602f07 | ||
|
99e582d79a | ||
|
a81ca93139 | ||
|
2744682e77 | ||
|
d665c79cc9 | ||
|
3c38a25bbb | ||
|
0cd82fa166 | ||
|
99fa7f8132 | ||
|
82104c9329 | ||
|
40342bfa4a | ||
|
912fe477a4 | ||
|
b31ee798bd | ||
|
6c4ca140ed | ||
|
449846ccb2 | ||
|
3353bb99ae | ||
|
b7e5cbeb3b | ||
|
21b900dceb | ||
|
c9f92f465b | ||
|
398f3779cc | ||
|
257622cf6b | ||
|
07ad325b1a | ||
|
76f5e92528 | ||
|
71859f8f3b | ||
|
a3df06d081 | ||
|
dae7dc30e0 | ||
|
14cebd181d | ||
|
522a425708 | ||
|
0fbcbb3aeb | ||
|
8a5930ad72 | ||
|
270659f03f | ||
|
9018acde5f | ||
|
5453aa6169 | ||
|
b51ed132f7 | ||
|
8524a8da7f | ||
|
cfcbc61449 | ||
|
9184733261 | ||
|
363146dacf | ||
|
ad1b722898 | ||
|
8163b99a75 | ||
|
835c2ee74a | ||
|
19fc4ac47c | ||
|
acd976ad5b | ||
|
37ef5485b4 | ||
|
7f4498f8b1 | ||
|
538c410bcd | ||
|
c3296f2e01 | ||
|
242a6037e8 | ||
|
bf0ff212b9 | ||
|
a5b7ea93c4 | ||
|
8128627f08 |
1550
.circleci/codecov.sh
Normal file
1550
.circleci/codecov.sh
Normal file
File diff suppressed because it is too large
Load Diff
@@ -16,7 +16,7 @@ jobs:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v2-pkg-cache
|
||||
- v3-pkg-cache
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
@@ -38,13 +38,13 @@ jobs:
|
||||
- bin
|
||||
- profiles
|
||||
- save_cache:
|
||||
key: v2-pkg-cache
|
||||
key: v3-pkg-cache
|
||||
paths:
|
||||
- /go/pkg
|
||||
- save_cache:
|
||||
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
paths:
|
||||
- /go/src/github.com/tendermint/tendermint
|
||||
# - save_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
# paths:
|
||||
# - /go/src/github.com/tendermint/tendermint
|
||||
|
||||
build_slate:
|
||||
<<: *defaults
|
||||
@@ -52,9 +52,24 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v2-pkg-cache
|
||||
- restore_cache:
|
||||
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
key: v3-pkg-cache
|
||||
# https://discuss.circleci.com/t/saving-cache-stopped-working-warning-skipping-this-step-disabled-in-configuration/24423/2
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: slate docs
|
||||
command: |
|
||||
@@ -68,15 +83,35 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v2-pkg-cache
|
||||
- restore_cache:
|
||||
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: metalinter
|
||||
command: |
|
||||
set -ex
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make metalinter
|
||||
- run:
|
||||
name: check_dep
|
||||
command: |
|
||||
set -ex
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make check_dep
|
||||
|
||||
test_abci_apps:
|
||||
<<: *defaults
|
||||
@@ -84,9 +119,23 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v2-pkg-cache
|
||||
- restore_cache:
|
||||
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: Run abci apps tests
|
||||
command: |
|
||||
@@ -101,9 +150,23 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v2-pkg-cache
|
||||
- restore_cache:
|
||||
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: Run abci-cli tests
|
||||
command: |
|
||||
@@ -116,9 +179,23 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v2-pkg-cache
|
||||
- restore_cache:
|
||||
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils
|
||||
- run:
|
||||
name: Run tests
|
||||
@@ -131,17 +208,31 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v2-pkg-cache
|
||||
- restore_cache:
|
||||
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run: mkdir -p /tmp/logs
|
||||
- run:
|
||||
name: Run tests
|
||||
command: |
|
||||
for pkg in $(go list github.com/tendermint/tendermint/... | grep -v /vendor/ | circleci tests split --split-by=timings); do
|
||||
for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do
|
||||
id=$(basename "$pkg")
|
||||
|
||||
GOCACHE=off go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
GOCACHE=off go test -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
done
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
@@ -156,13 +247,49 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v2-pkg-cache
|
||||
- restore_cache:
|
||||
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: Run tests
|
||||
command: bash test/persist/test_failure_indices.sh
|
||||
|
||||
localnet:
|
||||
working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
|
||||
machine:
|
||||
image: circleci/classic:latest
|
||||
environment:
|
||||
GOBIN: /home/circleci/.go_workspace/bin
|
||||
GOPATH: /home/circleci/.go_workspace/
|
||||
GOOS: linux
|
||||
GOARCH: amd64
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: run localnet and exit on failure
|
||||
command: |
|
||||
set -x
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make build-linux
|
||||
make localnet-start &
|
||||
./scripts/localnet-blocks-test.sh 40 5 10 localhost
|
||||
|
||||
test_p2p:
|
||||
environment:
|
||||
GOBIN: /home/circleci/.go_workspace/bin
|
||||
@@ -180,8 +307,22 @@ jobs:
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: gather
|
||||
command: |
|
||||
@@ -193,7 +334,7 @@ jobs:
|
||||
done
|
||||
- run:
|
||||
name: upload
|
||||
command: bash <(curl -s https://codecov.io/bash) -f coverage.txt
|
||||
command: bash .circleci/codecov.sh -f coverage.txt
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
@@ -218,6 +359,9 @@ workflows:
|
||||
- test_persistence:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- localnet:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- test_p2p
|
||||
- upload_coverage:
|
||||
requires:
|
||||
|
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -4,4 +4,4 @@
|
||||
* @ebuchman @melekes @xla
|
||||
|
||||
# Precious documentation
|
||||
/docs/ @zramsay @jolesbi
|
||||
/docs/ @zramsay
|
||||
|
42
.github/ISSUE_TEMPLATE
vendored
42
.github/ISSUE_TEMPLATE
vendored
@@ -1,42 +0,0 @@
|
||||
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
|
||||
|
||||
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
|
||||
|
||||
<!--
|
||||
If this is a BUG REPORT, please:
|
||||
- Fill in as much of the template below as you can.
|
||||
|
||||
If this is a FEATURE REQUEST, please:
|
||||
- Describe *in detail* the feature/behavior/change you'd like to see.
|
||||
|
||||
In both cases, be ready for followup questions, and please respond in a timely
|
||||
manner. We might ask you to provide additional logs and data (tendermint & app)
|
||||
in a case of bug.
|
||||
-->
|
||||
|
||||
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
|
||||
|
||||
|
||||
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
|
||||
|
||||
**Environment**:
|
||||
- **OS** (e.g. from /etc/os-release):
|
||||
- **Install tools**:
|
||||
- **Others**:
|
||||
|
||||
|
||||
**What happened**:
|
||||
|
||||
|
||||
**What you expected to happen**:
|
||||
|
||||
|
||||
**How to reproduce it** (as minimally and precisely as possible):
|
||||
|
||||
**Logs (you can paste a small part showing an error or link a pastebin, gist, etc. containing more of the log file)**:
|
||||
|
||||
**Config (you can paste only the changes you've made)**:
|
||||
|
||||
**`/dump_consensus_state` output for consensus bugs**
|
||||
|
||||
**Anything else do we need to know**:
|
42
.github/ISSUE_TEMPLATE/bug-report.md
vendored
Normal file
42
.github/ISSUE_TEMPLATE/bug-report.md
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
name: Bug Report
|
||||
about: Create a report to help us squash bugs!
|
||||
|
||||
---
|
||||
<!--
|
||||
Please fill in as much of the template below as you can.
|
||||
|
||||
Be ready for followup questions, and please respond in a timely
|
||||
manner. We might ask you to provide additional logs and data (tendermint & app).
|
||||
-->
|
||||
|
||||
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
|
||||
|
||||
|
||||
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
|
||||
|
||||
**Environment**:
|
||||
- **OS** (e.g. from /etc/os-release):
|
||||
- **Install tools**:
|
||||
- **Others**:
|
||||
|
||||
|
||||
**What happened**:
|
||||
|
||||
|
||||
**What you expected to happen**:
|
||||
|
||||
|
||||
**Have you tried the latest version**: yes/no
|
||||
|
||||
**How to reproduce it** (as minimally and precisely as possible):
|
||||
|
||||
**Logs (paste a small part showing an error (< 10 lines) or link a pastebin, gist, etc. containing more of the log file)**:
|
||||
|
||||
**Config (you can paste only the changes you've made)**:
|
||||
|
||||
**node command runtime flags**:
|
||||
|
||||
**`/dump_consensus_state` output for consensus bugs**
|
||||
|
||||
**Anything else we need to know**:
|
13
.github/ISSUE_TEMPLATE/feature-request.md
vendored
Normal file
13
.github/ISSUE_TEMPLATE/feature-request.md
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
name: Feature Request
|
||||
about: Create a proposal to request a feature
|
||||
|
||||
---
|
||||
<!--
|
||||
Please describe *in detail* the feature/behavior/change you'd like to see.
|
||||
|
||||
Be ready for followup questions, and please respond in a timely
|
||||
manner.
|
||||
|
||||
Word of caution: poorly thought out proposals may be rejected without deliberation
|
||||
-->
|
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -3,4 +3,4 @@
|
||||
* [ ] Updated all relevant documentation in docs
|
||||
* [ ] Updated all code comments where relevant
|
||||
* [ ] Wrote tests
|
||||
* [ ] Updated CHANGELOG.md
|
||||
* [ ] Updated CHANGELOG_PENDING.md
|
||||
|
7
.gitignore
vendored
7
.gitignore
vendored
@@ -16,8 +16,8 @@ coverage.txt
|
||||
docs/_build
|
||||
*.log
|
||||
abci-cli
|
||||
abci/types/types.pb.go
|
||||
docs/node_modules/
|
||||
index.html.md
|
||||
|
||||
scripts/wal2json/wal2json
|
||||
scripts/cutWALUntil/cutWALUntil
|
||||
@@ -28,9 +28,14 @@ scripts/cutWALUntil/cutWALUntil
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
shunit2
|
||||
|
||||
.tendermint-lite
|
||||
addrbook.json
|
||||
|
||||
*/vendor
|
||||
*/.glide
|
||||
.terraform
|
||||
terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
|
||||
.vscode
|
220
CHANGELOG.md
220
CHANGELOG.md
@@ -1,5 +1,220 @@
|
||||
# Changelog
|
||||
|
||||
## 0.24.0
|
||||
|
||||
*September 6th, 2018*
|
||||
|
||||
Special thanks to external contributors with PRs included in this release: ackratos, james-ray, bradyjoestar,
|
||||
peerlink, Ahmah2009, bluele, b00f.
|
||||
|
||||
This release includes breaking upgrades in the block header,
|
||||
including the long awaited changes for delaying validator set updates by one
|
||||
block to better support light clients.
|
||||
It also fixes enforcement on the maximum size of blocks, and includes a BFT
|
||||
timestamp in each block that can be safely used by applications.
|
||||
There are also some minor breaking changes to the rpc, config, and ABCI.
|
||||
|
||||
See the [UPGRADING.md](UPGRADING.md#v0.24.0) for details on upgrading to the new
|
||||
version.
|
||||
|
||||
From here on, breaking changes will be broken down to better reflect how users
|
||||
are affected by a change.
|
||||
|
||||
A few more breaking changes are in the works - each will come with a clear
|
||||
Architecture Decision Record (ADR) explaining the change. You can review ADRs
|
||||
[here](https://github.com/tendermint/tendermint/tree/develop/docs/architecture)
|
||||
or in the [open Pull Requests](https://github.com/tendermint/tendermint/pulls).
|
||||
You can also check in on the [issues marked as
|
||||
breaking](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3Abreaking).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
- [config] [\#2169](https://github.com/tendermint/tendermint/issues/2169) Replace MaxNumPeers with MaxNumInboundPeers and MaxNumOutboundPeers
|
||||
- [config] [\#2300](https://github.com/tendermint/tendermint/issues/2300) Reduce default mempool size from 100k to 5k, until ABCI rechecking is implemented.
|
||||
- [rpc] [\#1815](https://github.com/tendermint/tendermint/issues/1815) `/commit` returns a `signed_header` field instead of everything being top-level
|
||||
|
||||
* Apps
|
||||
- [abci] Added address of the original proposer of the block to Header
|
||||
- [abci] Change ABCI Header to match Tendermint exactly
|
||||
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
- Remove PubKey from `Validator` (so it's just Address and Power)
|
||||
- Introduce `ValidatorUpdate` (with just PubKey and Power)
|
||||
- InitChain and EndBlock use ValidatorUpdate
|
||||
- Update field names and types in BeginBlock
|
||||
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block
|
||||
- updates returned in ResponseEndBlock for block H will be included in RequestBeginBlock for block H+2
|
||||
|
||||
* Go API
|
||||
- [lite] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Complete refactor of the package
|
||||
- [node] [\#2212](https://github.com/tendermint/tendermint/issues/2212) NewNode now accepts a `*p2p.NodeKey` (@bradyjoestar)
|
||||
- [libs/common] [\#2199](https://github.com/tendermint/tendermint/issues/2199) Remove Fmt, in favor of fmt.Sprintf
|
||||
- [libs/common] SplitAndTrim was deleted
|
||||
- [libs/common] [\#2274](https://github.com/tendermint/tendermint/issues/2274) Remove unused Math functions like MaxInt, MaxInt64,
|
||||
MinInt, MinInt64 (@Ahmah2009)
|
||||
- [libs/clist] Panics if list extends beyond MaxLength
|
||||
- [crypto] [\#2205](https://github.com/tendermint/tendermint/issues/2205) Rename AminoRoute variables to no longer be prefixed by signature type.
|
||||
|
||||
* Blockchain Protocol
|
||||
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
|
||||
- Add NextValidatorSet to State, changes on-disk representation of state
|
||||
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)).
|
||||
- Remove ConsensusParams.BlockSize.MaxTxs
|
||||
- Introduce maximum sizes for all components of a block, including ChainID
|
||||
- [types] Updates to the block Header:
|
||||
- [\#1815](https://github.com/tendermint/tendermint/issues/1815) NextValidatorsHash - hash of the validator set for the next block,
|
||||
so the current validators actually sign over the hash for the new
|
||||
validators
|
||||
- [\#2106](https://github.com/tendermint/tendermint/issues/2106) ProposerAddress - address of the block's original proposer
|
||||
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
|
||||
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
|
||||
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)):
|
||||
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
|
||||
- malleability removed by requiring `s` to be in canonical form.
|
||||
|
||||
* P2P Protocol
|
||||
- [p2p] [\#2263](https://github.com/tendermint/tendermint/issues/2263) Update secret connection to use a little endian encoded nonce
|
||||
- [blockchain] [\#2213](https://github.com/tendermint/tendermint/issues/2213) Fix Amino routes for blockchain reactor messages
|
||||
(@peerlink)
|
||||
|
||||
|
||||
FEATURES:
|
||||
- [types] [\#2015](https://github.com/tendermint/tendermint/issues/2015) Allow genesis file to have 0 validators (@b00f)
|
||||
- Initial validator set can be determined by the app in ResponseInitChain
|
||||
- [rpc] [\#2161](https://github.com/tendermint/tendermint/issues/2161) New event `ValidatorSetUpdates` for when the validator set changes
|
||||
- [crypto/multisig] [\#2164](https://github.com/tendermint/tendermint/issues/2164) Introduce multisig pubkey and signature format
|
||||
- [libs/db] [\#2293](https://github.com/tendermint/tendermint/issues/2293) Allow passing options through when creating instances of leveldb dbs
|
||||
|
||||
IMPROVEMENTS:
|
||||
- [docs] Lint documentation with `write-good` and `stop-words`.
|
||||
- [docs] [\#2249](https://github.com/tendermint/tendermint/issues/2249) Refactor, deduplicate, and improve the ABCI docs and spec (with thanks to @ttmc).
|
||||
- [scripts] [\#2196](https://github.com/tendermint/tendermint/issues/2196) Added json2wal tool, which is supposed to help our users restore (@bradyjoestar)
|
||||
corrupted WAL files and compose test WAL files (@bradyjoestar)
|
||||
- [mempool] [\#2234](https://github.com/tendermint/tendermint/issues/2234) Now stores txs by hash inside of the cache, to mitigate memory leakage
|
||||
- [mempool] [\#2166](https://github.com/tendermint/tendermint/issues/2166) Set explicit capacity for map when updating txs (@bluele)
|
||||
|
||||
BUG FIXES:
|
||||
- [config] [\#2284](https://github.com/tendermint/tendermint/issues/2284) Replace `db_path` with `db_dir` from automatically generated configuration files.
|
||||
- [mempool] [\#2188](https://github.com/tendermint/tendermint/issues/2188) Fix OOM issue from cache map and list getting out of sync
|
||||
- [state] [\#2051](https://github.com/tendermint/tendermint/issues/2051) KV store index supports searching by `tx.height` (@ackratos)
|
||||
- [rpc] [\#2327](https://github.com/tendermint/tendermint/issues/2327) `/dial_peers` does not try to dial existing peers
|
||||
- [node] [\#2323](https://github.com/tendermint/tendermint/issues/2323) Filter empty strings from config lists (@james-ray)
|
||||
- [abci/client] [\#2236](https://github.com/tendermint/tendermint/issues/2236) Fix closing GRPC connection (@bradyjoestar)
|
||||
|
||||
## 0.23.1
|
||||
|
||||
*August 22nd, 2018*
|
||||
|
||||
BUG FIXES:
|
||||
- [libs/autofile] \#2261 Fix log rotation so it actually happens.
|
||||
- Fixes issues with consensus WAL growing unbounded ala \#2259
|
||||
|
||||
## 0.23.0
|
||||
|
||||
*August 5th, 2018*
|
||||
|
||||
This release includes breaking upgrades in our P2P encryption,
|
||||
some ABCI messages, and how we encode time and signatures.
|
||||
|
||||
A few more changes are still coming to the Header, ABCI,
|
||||
and validator set handling to better support light clients, BFT time, and
|
||||
upgrades. Most notably, validator set changes will be delayed by one block (see
|
||||
[#1815][i1815]).
|
||||
|
||||
We also removed `make ensure_deps` in favour of `make get_vendor_deps`.
|
||||
|
||||
BREAKING CHANGES:
|
||||
- [abci] Changed time format from int64 to google.protobuf.Timestamp
|
||||
- [abci] Changed Validators to LastCommitInfo in RequestBeginBlock
|
||||
- [abci] Removed Fee from ResponseDeliverTx and ResponseCheckTx
|
||||
- [crypto] Switch crypto.Signature from interface to []byte for space efficiency
|
||||
[#2128](https://github.com/tendermint/tendermint/pull/2128)
|
||||
- NOTE: this means signatures no longer have the prefix bytes in Amino
|
||||
binary nor the `type` field in Amino JSON. They're just bytes.
|
||||
- [p2p] Remove salsa and ripemd primitives, in favor of using chacha as a stream cipher, and hkdf [#2054](https://github.com/tendermint/tendermint/pull/2054)
|
||||
- [tools] Removed `make ensure_deps` in favor of `make get_vendor_deps`
|
||||
- [types] CanonicalTime uses nanoseconds instead of clipping to ms
|
||||
- breaks serialization/signing of all messages with a timestamp
|
||||
|
||||
FEATURES:
|
||||
- [tools] Added `make check_dep`
|
||||
- ensures gopkg.lock is synced with gopkg.toml
|
||||
- ensures no branches are used in the gopkg.toml
|
||||
|
||||
IMPROVEMENTS:
|
||||
- [blockchain] Improve fast-sync logic
|
||||
[#1805](https://github.com/tendermint/tendermint/pull/1805)
|
||||
- tweak params
|
||||
- only process one block at a time to avoid starving
|
||||
- [common] bit array functions which take in another parameter are now thread safe
|
||||
- [crypto] Switch hkdfchachapoly1305 to xchachapoly1305
|
||||
- [p2p] begin connecting to peers as soon a seed node provides them to you ([#2093](https://github.com/tendermint/tendermint/issues/2093))
|
||||
|
||||
BUG FIXES:
|
||||
- [common] Safely handle cases where atomic write files already exist [#2109](https://github.com/tendermint/tendermint/issues/2109)
|
||||
- [privval] fix a deadline for accepting new connections in socket private
|
||||
validator.
|
||||
- [p2p] Allow startup if a configured seed node's IP can't be resolved ([#1716](https://github.com/tendermint/tendermint/issues/1716))
|
||||
- [node] Fully exit when CTRL-C is pressed even if consensus state panics [#2072](https://github.com/tendermint/tendermint/issues/2072)
|
||||
|
||||
[i1815]: https://github.com/tendermint/tendermint/pull/1815
|
||||
|
||||
## 0.22.8
|
||||
|
||||
*July 26th, 2018*
|
||||
|
||||
BUG FIXES
|
||||
|
||||
- [consensus, blockchain] Fix 0.22.7 below.
|
||||
|
||||
## 0.22.7
|
||||
|
||||
*July 26th, 2018*
|
||||
|
||||
BUG FIXES
|
||||
|
||||
- [consensus, blockchain] Register the Evidence interface so it can be
|
||||
marshalled/unmarshalled by the blockchain and consensus reactors
|
||||
|
||||
## 0.22.6
|
||||
|
||||
*July 24th, 2018*
|
||||
|
||||
BUG FIXES
|
||||
|
||||
- [rpc] Fix `/blockchain` endpoint
|
||||
- (#2049) Fix OOM attack by returning error on negative input
|
||||
- Fix result length to have max 20 (instead of 21) block metas
|
||||
- [rpc] Validate height is non-negative in `/abci_query`
|
||||
- [consensus] (#2050) Include evidence in proposal block parts (previously evidence was
|
||||
not being included in blocks!)
|
||||
- [p2p] (#2046) Close rejected inbound connections so file descriptor doesn't
|
||||
leak
|
||||
- [Gopkg] (#2053) Fix versions in the toml
|
||||
|
||||
## 0.22.5
|
||||
|
||||
*July 23th, 2018*
|
||||
|
||||
BREAKING CHANGES:
|
||||
- [crypto] Refactor `tendermint/crypto` into many subpackages
|
||||
- [libs/common] remove exponentially distributed random numbers
|
||||
|
||||
IMPROVEMENTS:
|
||||
- [abci, libs/common] Generated gogoproto static marshaller methods
|
||||
- [config] Increase default send/recv rates to 5 mB/s
|
||||
- [p2p] reject addresses coming from private peers
|
||||
- [p2p] allow persistent peers to be private
|
||||
|
||||
BUG FIXES:
|
||||
- [mempool] fixed a race condition when `create_empty_blocks=false` where a
|
||||
transaction is published at an old height.
|
||||
- [p2p] dial external IP setup by `persistent_peers`, not internal NAT IP
|
||||
- [rpc] make `/status` RPC endpoint resistant to consensus halt
|
||||
|
||||
## 0.22.4
|
||||
|
||||
*July 14th, 2018*
|
||||
@@ -14,7 +229,8 @@ FEATURES:
|
||||
BUG FIXES:
|
||||
- [tools/tm-bench] Various fixes
|
||||
- [consensus] Wait for WAL to stop on shutdown
|
||||
- [abci] Fix #1891, pending requests cannot hang when abci server dies. Previously a crash in BeginBlock could leave tendermint in broken state.
|
||||
- [abci] Fix #1891, pending requests cannot hang when abci server dies.
|
||||
Previously a crash in BeginBlock could leave tendermint in broken state.
|
||||
|
||||
## 0.22.3
|
||||
|
||||
@@ -534,7 +750,7 @@ BREAKING CHANGES:
|
||||
- use scripts/wal2json to convert to json for debugging
|
||||
|
||||
FEATURES:
|
||||
- new `certifiers` pkg contains the tendermint light-client library (name subject to change)!
|
||||
- new `Verifiers` pkg contains the tendermint light-client library (name subject to change)!
|
||||
- rpc: `/genesis` includes the `app_options` .
|
||||
- rpc: `/abci_query` takes an additional `height` parameter to support historical queries.
|
||||
- rpc/client: new ABCIQueryWithOptions supports options like `trusted` (set false to get a proof) and `height` to query a historical height.
|
||||
|
22
CHANGELOG_PENDING.md
Normal file
22
CHANGELOG_PENDING.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Pending
|
||||
|
||||
Special thanks to external contributors with PRs included in this release:
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
|
||||
* Apps
|
||||
|
||||
* Go API
|
||||
|
||||
* Blockchain Protocol
|
||||
|
||||
* P2P Protocol
|
||||
|
||||
|
||||
FEATURES:
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
BUG FIXES:
|
221
Gopkg.lock
generated
221
Gopkg.lock
generated
@@ -3,48 +3,63 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
pruneopts = "UT"
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2c00f064ba355903866cbfbf3f7f4c0fe64af6638cc7d1b8bdcf3181bc67f1d8"
|
||||
name = "github.com/btcsuite/btcd"
|
||||
packages = ["btcec"]
|
||||
revision = "fdfc19097e7ac6b57035062056f5b7b4638b8898"
|
||||
pruneopts = "UT"
|
||||
revision = "f5e261fc9ec3437697fb31d8b38453c293204b29"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf"
|
||||
name = "github.com/btcsuite/btcutil"
|
||||
packages = [
|
||||
"base58",
|
||||
"bech32"
|
||||
"bech32",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = "UT"
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c7644c73a3d23741fdba8a99b1464e021a224b7e205be497271a8003a15ca41b"
|
||||
name = "github.com/ebuchman/fail-test"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "95f809107225be108efcf10a3509e4ea6ceef3c4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70"
|
||||
name = "github.com/fortytw2/leaktest"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd"
|
||||
name = "github.com/fsnotify/fsnotify"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||
version = "v1.4.7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fdf5169073fb0ad6dc12a70c249145e30f4058647bea25f0abd48b6d9f228a11"
|
||||
name = "github.com/go-kit/kit"
|
||||
packages = [
|
||||
"log",
|
||||
@@ -53,24 +68,30 @@
|
||||
"metrics",
|
||||
"metrics/discard",
|
||||
"metrics/internal/lv",
|
||||
"metrics/prometheus"
|
||||
"metrics/prometheus",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "4dc7be5d2d12881735283bcab7352178e190fc71"
|
||||
version = "v0.6.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:31a18dae27a29aa074515e43a443abfd2ba6deb6d69309d8d7ce789c45f34659"
|
||||
name = "github.com/go-logfmt/logfmt"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c4a2528ccbcabf90f9f3c464a5fc9e302d592861bbfd0b7135a7de8a943d0406"
|
||||
name = "github.com/go-stack/stack"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc"
|
||||
version = "v1.7.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e"
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"gogoproto",
|
||||
@@ -78,37 +99,45 @@
|
||||
"proto",
|
||||
"protoc-gen-gogo/descriptor",
|
||||
"sortkeys",
|
||||
"types"
|
||||
"types",
|
||||
]
|
||||
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
|
||||
version = "v1.0.0"
|
||||
pruneopts = "UT"
|
||||
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp"
|
||||
"ptypes/timestamp",
|
||||
]
|
||||
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||
version = "v1.0.0"
|
||||
pruneopts = "UT"
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:4a0c6bb4805508a6287675fac876be2ac1182539ca8a32468d8128882e9d5009"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:43dd08a10854b2056e615d1b1d22ac94559d822e1f8b6fcc92c1a1057e85188e"
|
||||
name = "github.com/gorilla/websocket"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:12247a2e99a060cc692f6680e5272c8adf0b8f572e6bce0d7095e624c958a240"
|
||||
name = "github.com/hashicorp/hcl"
|
||||
packages = [
|
||||
".",
|
||||
@@ -119,154 +148,197 @@
|
||||
"hcl/token",
|
||||
"json/parser",
|
||||
"json/scanner",
|
||||
"json/token"
|
||||
"json/token",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:39b27d1381a30421f9813967a5866fba35dc1d4df43a6eefe3b7a5444cb07214"
|
||||
name = "github.com/jmhodges/levigo"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a64e323dc06b73892e5bb5d040ced475c4645d456038333883f58934abbf6f72"
|
||||
name = "github.com/kr/logfmt"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7"
|
||||
name = "github.com/magiconair/properties"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c2353362d570a7bfa228149c62842019201cfb71"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
pruneopts = "UT"
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:5ab79470a1d0fb19b041a624415612f8236b3c06070161a910562f2b2d064355"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b"
|
||||
pruneopts = "UT"
|
||||
revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e"
|
||||
name = "github.com/pelletier/go-toml"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
pruneopts = "UT"
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c1a04665f9613e082e1209cf288bf64f4068dcd6c87a64bf1c4ff006ad422ba0"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/promhttp"
|
||||
"prometheus/promhttp",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "ae27198cdd90bf12cd134ad79d1366a6cf49f632"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
pruneopts = "UT"
|
||||
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model"
|
||||
"model",
|
||||
]
|
||||
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
|
||||
pruneopts = "UT"
|
||||
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs"
|
||||
"xfs",
|
||||
]
|
||||
revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a"
|
||||
pruneopts = "UT"
|
||||
revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c"
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:bd1ae00087d17c5a748660b8e89e1043e1e5479d0fea743352cda2f8dd8c4f84"
|
||||
name = "github.com/spf13/afero"
|
||||
packages = [
|
||||
".",
|
||||
"mem"
|
||||
"mem",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "787d034dfe70e44075ccc060d346146ef53270ad"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:516e71bed754268937f57d4ecb190e01958452336fa73dbac880894164e91c1f"
|
||||
name = "github.com/spf13/cast"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "8965335b8c7107321228e3e3702cab9832751bac"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7ffc0983035bc7e297da3688d9fe19d60a420e9c38bef23f845c53788ed6a05e"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
|
||||
version = "v0.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:080e5f630945ad754f4b920e60b4d3095ba0237ebf88dc462eb28002932e3805"
|
||||
name = "github.com/spf13/jwalterweatherman"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f8e1a678a2571e265f4bf91a3e5e32aa6b1474a55cb0ea849750cc177b664d96"
|
||||
name = "github.com/spf13/viper"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7e8d267900c7fa7f35129a2a37596e38ed0f11ca746d6d9ba727980ee138f9f6"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require"
|
||||
"require",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b3cfb8d82b1601a846417c3f31c03a7961862cb2c98dcf0959c473843e6d9a2b"
|
||||
name = "github.com/syndtr/goleveldb"
|
||||
packages = [
|
||||
"leveldb",
|
||||
@@ -280,28 +352,41 @@
|
||||
"leveldb/opt",
|
||||
"leveldb/storage",
|
||||
"leveldb/table",
|
||||
"leveldb/util"
|
||||
"leveldb/util",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "c4c61651e9e37fa117f53c5a906d3b63090d8445"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:605b6546f3f43745695298ec2d342d3e952b6d91cdf9f349bea9315f677d759f"
|
||||
name = "github.com/tendermint/btcd"
|
||||
packages = ["btcec"]
|
||||
pruneopts = "UT"
|
||||
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:087aaa7920e5d0bf79586feb57ce01c35c830396ab4392798112e8aae8c47722"
|
||||
name = "github.com/tendermint/ed25519"
|
||||
packages = [
|
||||
".",
|
||||
"edwards25519",
|
||||
"extra25519"
|
||||
"extra25519",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e0a2a4be1e20c305badc2b0a7a9ab7fef6da500763bec23ab81df3b5f9eec9ee"
|
||||
name = "github.com/tendermint/go-amino"
|
||||
packages = ["."]
|
||||
revision = "2106ca61d91029c931fd54968c2bb02dc96b1412"
|
||||
version = "0.10.1"
|
||||
pruneopts = "UT"
|
||||
revision = "a8328986c1608950fa5d3d1c0472cccc4f8fc02c"
|
||||
version = "v0.12.0-rc0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c31a37cafc12315b8bd745c8ad6a006ac25350472488162a821e557b3e739d67"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"bcrypt",
|
||||
@@ -317,11 +402,13 @@
|
||||
"openpgp/errors",
|
||||
"poly1305",
|
||||
"ripemd160",
|
||||
"salsa20/salsa"
|
||||
"salsa20/salsa",
|
||||
]
|
||||
revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602"
|
||||
pruneopts = "UT"
|
||||
revision = "56440b844dfe139a8ac053f4ecac0b20b79058f4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
@@ -331,20 +418,24 @@
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"netutil",
|
||||
"trace"
|
||||
"trace",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:bb0fe59917bdd5b89f49b9a8b26e5f465e325d9223b3a8e32254314bdf51e0f1"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"cpu",
|
||||
"unix"
|
||||
"unix",
|
||||
]
|
||||
revision = "1b2967e3c290b7c545b3db0deeda16e9be4f98a2"
|
||||
pruneopts = "UT"
|
||||
revision = "3dc4335d56c789b04b0ba99b7a37249d9b614314"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
@@ -360,17 +451,22 @@
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
"unicode/rangetable",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
|
||||
pruneopts = "UT"
|
||||
revision = "daca94659cb50e9f37c1b834680f2e46358f10b0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74"
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
@@ -382,9 +478,11 @@
|
||||
"credentials",
|
||||
"encoding",
|
||||
"encoding/proto",
|
||||
"grpclb/grpc_lb_v1/messages",
|
||||
"grpclog",
|
||||
"internal",
|
||||
"internal/backoff",
|
||||
"internal/channelz",
|
||||
"internal/grpcrand",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
@@ -395,20 +493,71 @@
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport"
|
||||
"transport",
|
||||
]
|
||||
revision = "d11072e7ca9811b1100b80ca0269ac831f06d024"
|
||||
version = "v1.11.3"
|
||||
pruneopts = "UT"
|
||||
revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
version = "v1.13.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "b0718135d5ade0a75c6b8fe703f70eb9d8064ba871ec31abd9ace3c4ab944100"
|
||||
input-imports = [
|
||||
"github.com/btcsuite/btcutil/base58",
|
||||
"github.com/btcsuite/btcutil/bech32",
|
||||
"github.com/ebuchman/fail-test",
|
||||
"github.com/fortytw2/leaktest",
|
||||
"github.com/go-kit/kit/log",
|
||||
"github.com/go-kit/kit/log/level",
|
||||
"github.com/go-kit/kit/log/term",
|
||||
"github.com/go-kit/kit/metrics",
|
||||
"github.com/go-kit/kit/metrics/discard",
|
||||
"github.com/go-kit/kit/metrics/prometheus",
|
||||
"github.com/go-logfmt/logfmt",
|
||||
"github.com/gogo/protobuf/gogoproto",
|
||||
"github.com/gogo/protobuf/jsonpb",
|
||||
"github.com/gogo/protobuf/proto",
|
||||
"github.com/gogo/protobuf/types",
|
||||
"github.com/golang/protobuf/proto",
|
||||
"github.com/golang/protobuf/ptypes/timestamp",
|
||||
"github.com/gorilla/websocket",
|
||||
"github.com/jmhodges/levigo",
|
||||
"github.com/pkg/errors",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"github.com/rcrowley/go-metrics",
|
||||
"github.com/spf13/cobra",
|
||||
"github.com/spf13/viper",
|
||||
"github.com/stretchr/testify/assert",
|
||||
"github.com/stretchr/testify/require",
|
||||
"github.com/syndtr/goleveldb/leveldb",
|
||||
"github.com/syndtr/goleveldb/leveldb/errors",
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator",
|
||||
"github.com/syndtr/goleveldb/leveldb/opt",
|
||||
"github.com/tendermint/btcd/btcec",
|
||||
"github.com/tendermint/ed25519",
|
||||
"github.com/tendermint/ed25519/extra25519",
|
||||
"github.com/tendermint/go-amino",
|
||||
"golang.org/x/crypto/bcrypt",
|
||||
"golang.org/x/crypto/chacha20poly1305",
|
||||
"golang.org/x/crypto/curve25519",
|
||||
"golang.org/x/crypto/hkdf",
|
||||
"golang.org/x/crypto/nacl/box",
|
||||
"golang.org/x/crypto/nacl/secretbox",
|
||||
"golang.org/x/crypto/openpgp/armor",
|
||||
"golang.org/x/crypto/ripemd160",
|
||||
"golang.org/x/net/context",
|
||||
"golang.org/x/net/netutil",
|
||||
"google.golang.org/grpc",
|
||||
"google.golang.org/grpc/credentials",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
31
Gopkg.toml
31
Gopkg.toml
@@ -10,11 +10,6 @@
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
@@ -35,11 +30,11 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
version = "=1.0.0"
|
||||
version = "=1.1.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "=1.0.0"
|
||||
version = "=1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gorilla/websocket"
|
||||
@@ -63,11 +58,11 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/go-amino"
|
||||
version = "=0.10.1"
|
||||
version = "v0.12.0-rc0"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "=1.11.3"
|
||||
version = "=1.13.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/fortytw2/leaktest"
|
||||
@@ -77,19 +72,9 @@
|
||||
## Some repos dont have releases.
|
||||
## Pin to revision
|
||||
|
||||
## We can remove this one by updating protobuf to v1.1.0
|
||||
## but then the grpc tests break with
|
||||
#--- FAIL: TestBroadcastTx (0.01s)
|
||||
#panic: message/group field common.KVPair:bytes without pointer [recovered]
|
||||
# panic: message/group field common.KVPair:bytes without pointer
|
||||
#
|
||||
# ...
|
||||
#
|
||||
# github.com/tendermint/tendermint/rpc/grpc_test.TestBroadcastTx(0xc420a5ab40)
|
||||
# /go/src/github.com/tendermint/tendermint/rpc/grpc/grpc_test.go:29 +0x141
|
||||
[[override]]
|
||||
name = "google.golang.org/genproto"
|
||||
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
|
||||
name = "github.com/jmhodges/levigo"
|
||||
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/ebuchman/fail-test"
|
||||
@@ -100,6 +85,10 @@
|
||||
name = "github.com/btcsuite/btcutil"
|
||||
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/btcd"
|
||||
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
|
||||
|
||||
# Haven't made a release since 2016.
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
|
98
Makefile
98
Makefile
@@ -3,41 +3,50 @@ GOTOOLS = \
|
||||
github.com/golang/dep/cmd/dep \
|
||||
gopkg.in/alecthomas/gometalinter.v2 \
|
||||
github.com/gogo/protobuf/protoc-gen-gogo \
|
||||
github.com/gogo/protobuf/gogoproto \
|
||||
github.com/square/certstrap
|
||||
PACKAGES=$(shell go list ./... | grep -v '/vendor/')
|
||||
PACKAGES=$(shell go list ./...)
|
||||
|
||||
INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf
|
||||
BUILD_TAGS?=tendermint
|
||||
BUILD_TAGS?='tendermint'
|
||||
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`"
|
||||
|
||||
LINT_FLAGS = --exclude '.*\.pb\.go' --vendor --deadline=600s
|
||||
|
||||
all: check build test install
|
||||
|
||||
check: check_tools ensure_deps
|
||||
check: check_tools get_vendor_deps
|
||||
|
||||
|
||||
########################################
|
||||
### Build Tendermint
|
||||
|
||||
build:
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/
|
||||
|
||||
build_race:
|
||||
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint
|
||||
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint
|
||||
|
||||
install:
|
||||
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint
|
||||
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
|
||||
########################################
|
||||
### Protobuf
|
||||
|
||||
protoc_all: protoc_libs protoc_abci protoc_grpc
|
||||
|
||||
%.pb.go: %.proto
|
||||
## If you get the following error,
|
||||
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
|
||||
## See https://stackoverflow.com/a/25518702
|
||||
## Note the $< here is substituted for the %.proto
|
||||
## Note the $@ here is substituted for the %.pb.go
|
||||
protoc $(INCLUDE) $< --gogo_out=Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,plugins=grpc:.
|
||||
|
||||
########################################
|
||||
### Build ABCI
|
||||
|
||||
protoc_abci:
|
||||
## If you get the following error,
|
||||
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
|
||||
## See https://stackoverflow.com/a/25518702
|
||||
protoc $(INCLUDE) --gogo_out=plugins=grpc:. abci/types/*.proto
|
||||
@echo "--> adding nolint declarations to protobuf generated files"
|
||||
@awk '/package abci/types/ { print "//nolint: gas"; print; next }1' abci/types/types.pb.go > abci/types/types.pb.go.new
|
||||
@mv abci/types/types.pb.go.new abci/types/types.pb.go
|
||||
# see protobuf section above
|
||||
protoc_abci: abci/types/types.pb.go
|
||||
|
||||
build_abci:
|
||||
@go build -i ./abci/cmd/...
|
||||
@@ -51,7 +60,7 @@ install_abci:
|
||||
# dist builds binaries for all platforms and packages them for distribution
|
||||
# TODO add abci to these scripts
|
||||
dist:
|
||||
@BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'"
|
||||
@BUILD_TAGS=$(BUILD_TAGS) sh -c "'$(CURDIR)/scripts/dist.sh'"
|
||||
|
||||
########################################
|
||||
### Tools & dependencies
|
||||
@@ -68,31 +77,25 @@ get_tools:
|
||||
|
||||
update_tools:
|
||||
@echo "--> Updating tools"
|
||||
@go get -u $(GOTOOLS)
|
||||
go get -u -v $(GOTOOLS)
|
||||
|
||||
#Run this from CI
|
||||
#Update dependencies
|
||||
get_vendor_deps:
|
||||
@rm -rf vendor/
|
||||
@echo "--> Running dep"
|
||||
@dep ensure -vendor-only
|
||||
|
||||
|
||||
#Run this locally.
|
||||
ensure_deps:
|
||||
@rm -rf vendor/
|
||||
@echo "--> Running dep"
|
||||
@dep ensure
|
||||
|
||||
#For ABCI and libs
|
||||
get_protoc:
|
||||
@# https://github.com/google/protobuf/releases
|
||||
curl -L https://github.com/google/protobuf/releases/download/v3.4.1/protobuf-cpp-3.4.1.tar.gz | tar xvz && \
|
||||
cd protobuf-3.4.1 && \
|
||||
curl -L https://github.com/google/protobuf/releases/download/v3.6.1/protobuf-cpp-3.6.1.tar.gz | tar xvz && \
|
||||
cd protobuf-3.6.1 && \
|
||||
DIST_LANG=cpp ./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
make check && \
|
||||
sudo make install && \
|
||||
sudo ldconfig && \
|
||||
cd .. && \
|
||||
rm -rf protobuf-3.4.1
|
||||
rm -rf protobuf-3.6.1
|
||||
|
||||
draw_deps:
|
||||
@# requires brew install graphviz or apt-get install graphviz
|
||||
@@ -101,21 +104,14 @@ draw_deps:
|
||||
|
||||
get_deps_bin_size:
|
||||
@# Copy of build recipe with additional flags to perform binary size analysis
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ 2>&1))
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ 2>&1))
|
||||
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
|
||||
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
|
||||
|
||||
########################################
|
||||
### Libs
|
||||
|
||||
protoc_libs:
|
||||
## If you get the following error,
|
||||
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
|
||||
## See https://stackoverflow.com/a/25518702
|
||||
protoc $(INCLUDE) --go_out=plugins=grpc:. libs/common/*.proto
|
||||
@echo "--> adding nolint declarations to protobuf generated files"
|
||||
@awk '/package libs/common/ { print "//nolint: gas"; print; next }1' libs/common/types.pb.go > libs/common/types.pb.go.new
|
||||
@mv libs/common/types.pb.go.new libs/common/types.pb.go
|
||||
protoc_libs: libs/common/types.pb.go
|
||||
|
||||
gen_certs: clean_certs
|
||||
## Generating certificates for TLS testing...
|
||||
@@ -130,12 +126,14 @@ clean_certs:
|
||||
rm -f db/remotedb/::.crt db/remotedb/::.key
|
||||
|
||||
test_libs: gen_certs
|
||||
GOCACHE=off go test -tags gcc $(shell go list ./... | grep -v vendor)
|
||||
GOCACHE=off go test -tags gcc $(PACKAGES)
|
||||
make clean_certs
|
||||
|
||||
grpc_dbserver:
|
||||
protoc -I db/remotedb/proto/ db/remotedb/proto/defs.proto --go_out=plugins=grpc:db/remotedb/proto
|
||||
|
||||
protoc_grpc: rpc/grpc/types.pb.go
|
||||
|
||||
########################################
|
||||
### Testing
|
||||
|
||||
@@ -206,11 +204,11 @@ vagrant_test:
|
||||
### go tests
|
||||
test:
|
||||
@echo "--> Running go test"
|
||||
@go test $(PACKAGES)
|
||||
@GOCACHE=off go test -p 1 $(PACKAGES)
|
||||
|
||||
test_race:
|
||||
@echo "--> Running go test --race"
|
||||
@go test -v -race $(PACKAGES)
|
||||
@GOCACHE=off go test -p 1 -v -race $(PACKAGES)
|
||||
|
||||
|
||||
########################################
|
||||
@@ -221,7 +219,7 @@ fmt:
|
||||
|
||||
metalinter:
|
||||
@echo "--> Running linter"
|
||||
@gometalinter.v2 --vendor --deadline=600s --disable-all \
|
||||
@gometalinter.v2 $(LINT_FLAGS) --disable-all \
|
||||
--enable=deadcode \
|
||||
--enable=gosimple \
|
||||
--enable=misspell \
|
||||
@@ -250,7 +248,17 @@ metalinter:
|
||||
|
||||
metalinter_all:
|
||||
@echo "--> Running linter (all)"
|
||||
gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./...
|
||||
gometalinter.v2 $(LINT_FLAGS) --enable-all --disable=lll ./...
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
|
||||
rpc-docs:
|
||||
cat rpc/core/slate_header.txt > $(DESTINATION)
|
||||
godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's,/src/target,https://github.com/tendermint/tendermint/tree/master/rpc/core,' >> $(DESTINATION)
|
||||
|
||||
check_dep:
|
||||
dep status >> /dev/null
|
||||
!(grep -n branch Gopkg.toml)
|
||||
|
||||
###########################################################
|
||||
### Docker image
|
||||
@@ -307,4 +315,4 @@ build-slate:
|
||||
# To avoid unintended conflicts with file names, always add to .PHONY
|
||||
# unless there is a reason not to.
|
||||
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
||||
.PHONY: check build build_race build_abci dist install install_abci check_tools get_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate
|
||||
.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all
|
||||
|
@@ -22,7 +22,7 @@ develop | [ middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
|
||||
For protocol details, see [the specification](/docs/spec).
|
||||
For protocol details, see [the specification](/docs/spec). For a consensus proof and detailed protocol analysis checkout our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## A Note on Production Readiness
|
||||
|
||||
@@ -50,13 +50,13 @@ Go version | Go1.9 or higher
|
||||
|
||||
## Install
|
||||
|
||||
See the [install instructions](/docs/install.md)
|
||||
See the [install instructions](/docs/introduction/install.md)
|
||||
|
||||
## Quick Start
|
||||
|
||||
- [Single node](/docs/using-tendermint.md)
|
||||
- [Local cluster using docker-compose](/networks/local)
|
||||
- [Remote cluster using terraform and ansible](/docs/terraform-and-ansible.md)
|
||||
- [Remote cluster using terraform and ansible](/docs/networks/terraform-and-ansible.md)
|
||||
- [Join the public testnet](https://cosmos.network/testnet)
|
||||
|
||||
## Resources
|
||||
|
66
UPGRADING.md
Normal file
66
UPGRADING.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# Upgrading Tendermint Core
|
||||
|
||||
This guide provides steps to be followed when you upgrade your applications to
|
||||
a newer version of Tendermint Core.
|
||||
|
||||
## v0.24.0
|
||||
|
||||
New 0.24.0 release contains a lot of changes to the state and types. It's not
|
||||
compatible to the old versions and there is no straight forward way to update
|
||||
old data to be compatible with the new version.
|
||||
|
||||
To reset the state do:
|
||||
|
||||
```
|
||||
$ tendermint unsafe_reset_all
|
||||
```
|
||||
|
||||
Here we summarize some other notable changes to be mindful of.
|
||||
|
||||
### Config changes
|
||||
|
||||
`p2p.max_num_peers` was removed in favor of `p2p.max_num_inbound_peers` and
|
||||
`p2p.max_num_outbound_peers`.
|
||||
|
||||
```
|
||||
# Maximum number of inbound peers
|
||||
max_num_inbound_peers = 40
|
||||
|
||||
# Maximum number of outbound peers to connect to, excluding persistent peers
|
||||
max_num_outbound_peers = 10
|
||||
```
|
||||
|
||||
As you can see, the default ratio of inbound/outbound peers is 4/1. The reason
|
||||
is we want it to be easier for new nodes to connect to the network. You can
|
||||
tweak these parameters to alter the network topology.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
The result of `/commit` used to contain `header` and `commit` fields at the top level. These are now contained under the `signed_header` field.
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
The header has been upgraded and contains new fields, but none of the existing
|
||||
fields were changed, except their order.
|
||||
|
||||
The `Validator` type was split into two, one containing an `Address` and one
|
||||
containing a `PubKey`. When processing `RequestBeginBlock`, use the `Validator`
|
||||
type, which contains just the `Address`. When returning `ResponseEndBlock`, use
|
||||
the `ValidatorUpdate` type, which contains just the `PubKey`.
|
||||
|
||||
### Validator Set Updates
|
||||
|
||||
Validator set updates returned in ResponseEndBlock for height `H` used to take
|
||||
effect immediately at height `H+1`. Now they will be delayed one block, to take
|
||||
effect at height `H+2`. Note this means that the change will be seen by the ABCI
|
||||
app in the `RequestBeginBlock.LastCommitInfo` at block `H+3`. Apps were already
|
||||
required to maintain a map from validator addresses to pubkeys since v0.23 (when
|
||||
pubkeys were removed from RequestBeginBlock), but now they may need to track
|
||||
multiple validator sets at once to accomodate this delay.
|
||||
|
||||
|
||||
### Block Size
|
||||
|
||||
The `ConsensusParams.BlockSize.MaxTxs` was removed in favour of
|
||||
`ConsensusParams.BlockSize.MaxBytes`, which is now enforced. This means blocks
|
||||
are limitted only by byte-size, not by number of transactions.
|
@@ -17,19 +17,19 @@ The community has provided a number of addtional implementations, see the [Tende
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [A prose specification](specification.md)
|
||||
- [A protobuf file](https://github.com/tendermint/abci/blob/master/types/types.proto)
|
||||
- [A Go interface](https://github.com/tendermint/abci/blob/master/types/application.go).
|
||||
- [A protobuf file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto)
|
||||
- [A Go interface](https://github.com/tendermint/tendermint/blob/master/abci/types/application.go).
|
||||
|
||||
For more background information on ABCI, motivations, and tendermint, please visit [the documentation](http://tendermint.readthedocs.io/en/master/).
|
||||
For more background information on ABCI, motivations, and tendermint, please visit [the documentation](https://tendermint.com/docs/).
|
||||
The two guides to focus on are the `Application Development Guide` and `Using ABCI-CLI`.
|
||||
|
||||
|
||||
## Protocl Buffers
|
||||
## Protocol Buffers
|
||||
|
||||
To compile the protobuf file, run:
|
||||
|
||||
```
|
||||
make protoc
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint/; make protoc_abci
|
||||
```
|
||||
|
||||
See `protoc --help` and [the Protocol Buffers site](https://developers.google.com/protocol-buffers)
|
||||
@@ -42,10 +42,13 @@ The `abci-cli` is a simple tool for debugging ABCI servers and running some
|
||||
example apps. To install it:
|
||||
|
||||
```
|
||||
go get github.com/tendermint/abci
|
||||
cd $GOPATH/src/github.com/tendermint/abci
|
||||
mkdir -p $GOPATH/src/github.com/tendermint
|
||||
cd $GOPATH/src/github.com/tendermint
|
||||
git clone https://github.com/tendermint/tendermint.git
|
||||
cd tendermint
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make install
|
||||
make install_abci
|
||||
```
|
||||
|
||||
## Implementation
|
||||
@@ -91,7 +94,7 @@ Note the length-prefixing used in the socket implementation does not apply for G
|
||||
The `abci-cli` tool wraps an ABCI client and can be used for probing/testing an ABCI server.
|
||||
For instance, `abci-cli test` will run a test sequence against a listening server running the Counter application (see below).
|
||||
It can also be used to run some example applications.
|
||||
See [the documentation](http://tendermint.readthedocs.io/en/master/) for more details.
|
||||
See [the documentation](https://tendermint.com/docs/) for more details.
|
||||
|
||||
### Examples
|
||||
|
||||
|
@@ -22,6 +22,7 @@ type grpcClient struct {
|
||||
mustConnect bool
|
||||
|
||||
client types.ABCIApplicationClient
|
||||
conn *grpc.ClientConn
|
||||
|
||||
mtx sync.Mutex
|
||||
addr string
|
||||
@@ -60,10 +61,11 @@ RETRY_LOOP:
|
||||
|
||||
cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
|
||||
client := types.NewABCIApplicationClient(conn)
|
||||
cli.conn = conn
|
||||
|
||||
ENSURE_CONNECTED:
|
||||
for {
|
||||
_, err := client.Echo(context.Background(), &types.RequestEcho{"hello"}, grpc.FailFast(true))
|
||||
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.FailFast(true))
|
||||
if err == nil {
|
||||
break ENSURE_CONNECTED
|
||||
}
|
||||
@@ -78,12 +80,10 @@ RETRY_LOOP:
|
||||
|
||||
func (cli *grpcClient) OnStop() {
|
||||
cli.BaseService.OnStop()
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
// TODO: how to close conn? its not a net.Conn and grpc doesn't expose a Close()
|
||||
/*if cli.client.conn != nil {
|
||||
cli.client.conn.Close()
|
||||
}*/
|
||||
|
||||
if cli.conn != nil {
|
||||
cli.conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *grpcClient) StopForError(err error) {
|
||||
@@ -129,7 +129,7 @@ func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{&types.Response_Echo{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
@@ -138,7 +138,7 @@ func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{&types.Response_Flush{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
@@ -147,7 +147,7 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{&types.Response_Info{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
@@ -156,7 +156,7 @@ func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{&types.Response_SetOption{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
|
||||
@@ -165,7 +165,7 @@ func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{&types.Response_DeliverTx{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
|
||||
@@ -174,7 +174,7 @@ func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{&types.Response_CheckTx{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
@@ -183,7 +183,7 @@ func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{&types.Response_Query{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
@@ -192,7 +192,7 @@ func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{&types.Response_Commit{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
@@ -201,7 +201,7 @@ func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{&types.Response_InitChain{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
@@ -210,7 +210,7 @@ func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{&types.Response_BeginBlock{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
@@ -219,7 +219,7 @@ func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{&types.Response_EndBlock{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
|
||||
|
@@ -149,7 +149,7 @@ func (app *localClient) FlushSync() error {
|
||||
}
|
||||
|
||||
func (app *localClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{msg}, nil
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
|
@@ -477,11 +477,8 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
}
|
||||
|
||||
func cmdUnimplemented(cmd *cobra.Command, args []string) error {
|
||||
// TODO: Print out all the sub-commands available
|
||||
msg := "unimplemented command"
|
||||
if err := cmd.Help(); err != nil {
|
||||
msg = err.Error()
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
msg += fmt.Sprintf(" args: [%s]", strings.Join(args, " "))
|
||||
}
|
||||
@@ -489,6 +486,17 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
|
||||
Code: codeBad,
|
||||
Log: msg,
|
||||
})
|
||||
|
||||
fmt.Println("Available commands:")
|
||||
fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
|
||||
fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short)
|
||||
fmt.Println("Use \"[command] --help\" for more information about a command.")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -514,7 +522,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 1 {
|
||||
version = args[0]
|
||||
}
|
||||
res, err := client.InfoSync(types.RequestInfo{version})
|
||||
res, err := client.InfoSync(types.RequestInfo{Version: version})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -537,7 +545,7 @@ func cmdSetOption(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
key, val := args[0], args[1]
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{key, val})
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: val})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
type CounterApplication struct {
|
||||
@@ -22,7 +21,7 @@ func NewCounterApplication(serial bool) *CounterApplication {
|
||||
}
|
||||
|
||||
func (app *CounterApplication) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: cmn.Fmt("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
|
||||
@@ -34,7 +33,7 @@ func (app *CounterApplication) SetOption(req types.RequestSetOption) types.Respo
|
||||
TODO Panic and have the ABCI server pass an exception.
|
||||
The client can call SetOptionSync() and get an `error`.
|
||||
return types.ResponseSetOption{
|
||||
Error: cmn.Fmt("Unknown key (%s) or value (%s)", key, value),
|
||||
Error: fmt.Sprintf("Unknown key (%s) or value (%s)", key, value),
|
||||
}
|
||||
*/
|
||||
return types.ResponseSetOption{}
|
||||
@@ -95,10 +94,10 @@ func (app *CounterApplication) Commit() (resp types.ResponseCommit) {
|
||||
func (app *CounterApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
switch reqQuery.Path {
|
||||
case "hash":
|
||||
return types.ResponseQuery{Value: []byte(cmn.Fmt("%v", app.hashCount))}
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
|
||||
case "tx":
|
||||
return types.ResponseQuery{Value: []byte(cmn.Fmt("%v", app.txCount))}
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
|
||||
default:
|
||||
return types.ResponseQuery{Log: cmn.Fmt("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
|
||||
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
|
||||
}
|
||||
}
|
||||
|
@@ -7,6 +7,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
@@ -37,13 +39,13 @@ func TestGRPC(t *testing.T) {
|
||||
}
|
||||
|
||||
func testStream(t *testing.T, app types.Application) {
|
||||
numDeliverTxs := 200000
|
||||
numDeliverTxs := 20000
|
||||
|
||||
// Start the listener
|
||||
server := abciserver.NewSocketServer("unix://test.sock", app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
t.Fatalf("Error starting socket server: %v", err.Error())
|
||||
require.NoError(t, err, "Error starting socket server")
|
||||
}
|
||||
defer server.Stop()
|
||||
|
||||
@@ -70,7 +72,7 @@ func testStream(t *testing.T, app types.Application) {
|
||||
}
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 2) // Wait for a bit to allow counter overflow
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
close(done)
|
||||
}()
|
||||
return
|
||||
@@ -132,7 +134,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) {
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{[]byte("test")})
|
||||
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
|
||||
if err != nil {
|
||||
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
|
||||
}
|
||||
@@ -146,7 +148,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) {
|
||||
t.Log("response", counter)
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 2) // Wait for a bit to allow counter overflow
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
}()
|
||||
}
|
||||
|
||||
|
@@ -7,12 +7,10 @@ import (
|
||||
|
||||
// RandVal creates one random validator, with a key derived
|
||||
// from the input value
|
||||
func RandVal(i int) types.Validator {
|
||||
addr := cmn.RandBytes(20)
|
||||
func RandVal(i int) types.ValidatorUpdate {
|
||||
pubkey := cmn.RandBytes(32)
|
||||
power := cmn.RandUint16() + 1
|
||||
v := types.Ed25519Validator(pubkey, int64(power))
|
||||
v.Address = addr
|
||||
v := types.Ed25519ValidatorUpdate(pubkey, int64(power))
|
||||
return v
|
||||
}
|
||||
|
||||
@@ -20,8 +18,8 @@ func RandVal(i int) types.Validator {
|
||||
// the application. Note that the keys are deterministically
|
||||
// derived from the index in the array, while the power is
|
||||
// random (Change this if not desired)
|
||||
func RandVals(cnt int) []types.Validator {
|
||||
res := make([]types.Validator, cnt)
|
||||
func RandVals(cnt int) []types.ValidatorUpdate {
|
||||
res := make([]types.ValidatorUpdate, cnt)
|
||||
for i := 0; i < cnt; i++ {
|
||||
res[i] = RandVal(i)
|
||||
}
|
||||
|
@@ -81,8 +81,8 @@ func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
|
||||
app.state.Size += 1
|
||||
|
||||
tags := []cmn.KVPair{
|
||||
{[]byte("app.creator"), []byte("jae")},
|
||||
{[]byte("app.key"), key},
|
||||
{Key: []byte("app.creator"), Value: []byte("jae")},
|
||||
{Key: []byte("app.key"), Value: key},
|
||||
}
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags}
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"testing"
|
||||
@@ -90,8 +91,8 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
header := types.Header{
|
||||
Height: int64(height),
|
||||
}
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{hash, header, nil, nil})
|
||||
kvstore.EndBlock(types.RequestEndBlock{header.Height})
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
kvstore.Commit()
|
||||
|
||||
resInfo = kvstore.Info(types.RequestInfo{})
|
||||
@@ -121,11 +122,11 @@ func TestValUpdates(t *testing.T) {
|
||||
vals1, vals2 := vals[:nInit], kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
var v1, v2, v3 types.Validator
|
||||
var v1, v2, v3 types.ValidatorUpdate
|
||||
|
||||
// add some validators
|
||||
v1, v2 = vals[nInit], vals[nInit+1]
|
||||
diff := []types.Validator{v1, v2}
|
||||
diff := []types.ValidatorUpdate{v1, v2}
|
||||
tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power)
|
||||
|
||||
@@ -139,7 +140,7 @@ func TestValUpdates(t *testing.T) {
|
||||
v1.Power = 0
|
||||
v2.Power = 0
|
||||
v3.Power = 0
|
||||
diff = []types.Validator{v1, v2, v3}
|
||||
diff = []types.ValidatorUpdate{v1, v2, v3}
|
||||
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power)
|
||||
tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power)
|
||||
@@ -157,18 +158,18 @@ func TestValUpdates(t *testing.T) {
|
||||
} else {
|
||||
v1.Power = 5
|
||||
}
|
||||
diff = []types.Validator{v1}
|
||||
diff = []types.ValidatorUpdate{v1}
|
||||
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
|
||||
makeApplyBlock(t, kvstore, 3, diff, tx1)
|
||||
|
||||
vals1 = append([]types.Validator{v1}, vals1[1:]...)
|
||||
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
}
|
||||
|
||||
func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.Validator, txs ...[]byte) {
|
||||
func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, txs ...[]byte) {
|
||||
// make and apply block
|
||||
height := int64(heightInt)
|
||||
hash := []byte("foo")
|
||||
@@ -176,13 +177,13 @@ func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff
|
||||
Height: height,
|
||||
}
|
||||
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{hash, header, nil, nil})
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
for _, tx := range txs {
|
||||
if r := kvstore.DeliverTx(tx); r.IsErr() {
|
||||
t.Fatal(r)
|
||||
}
|
||||
}
|
||||
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{header.Height})
|
||||
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
kvstore.Commit()
|
||||
|
||||
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
|
||||
@@ -190,12 +191,12 @@ func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff
|
||||
}
|
||||
|
||||
// order doesn't matter
|
||||
func valsEqual(t *testing.T, vals1, vals2 []types.Validator) {
|
||||
func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
if len(vals1) != len(vals2) {
|
||||
t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1))
|
||||
}
|
||||
sort.Sort(types.Validators(vals1))
|
||||
sort.Sort(types.Validators(vals2))
|
||||
sort.Sort(types.ValidatorUpdates(vals1))
|
||||
sort.Sort(types.ValidatorUpdates(vals2))
|
||||
for i, v1 := range vals1 {
|
||||
v2 := vals2[i]
|
||||
if !bytes.Equal(v1.PubKey.Data, v2.PubKey.Data) ||
|
||||
@@ -207,7 +208,7 @@ func valsEqual(t *testing.T, vals1, vals2 []types.Validator) {
|
||||
|
||||
func makeSocketClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) {
|
||||
// Start the listener
|
||||
socket := cmn.Fmt("unix://%s.sock", name)
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
server := abciserver.NewSocketServer(socket, app)
|
||||
@@ -229,7 +230,7 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client,
|
||||
|
||||
func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) {
|
||||
// Start the listener
|
||||
socket := cmn.Fmt("unix://%s.sock", name)
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
gapp := types.NewGRPCApplication(app)
|
||||
|
@@ -9,7 +9,6 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
@@ -26,7 +25,7 @@ type PersistentKVStoreApplication struct {
|
||||
app *KVStoreApplication
|
||||
|
||||
// validator set
|
||||
ValUpdates []types.Validator
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
|
||||
logger log.Logger
|
||||
}
|
||||
@@ -102,7 +101,7 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
|
||||
// Track the block hash and header information
|
||||
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.Validator, 0)
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
@@ -114,11 +113,11 @@ func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) typ
|
||||
//---------------------------------------------
|
||||
// update validators
|
||||
|
||||
func (app *PersistentKVStoreApplication) Validators() (validators []types.Validator) {
|
||||
func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) {
|
||||
itr := app.app.state.db.Iterator(nil, nil)
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
if isValidatorTx(itr.Key()) {
|
||||
validator := new(types.Validator)
|
||||
validator := new(types.ValidatorUpdate)
|
||||
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -130,7 +129,7 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida
|
||||
}
|
||||
|
||||
func MakeValSetChangeTx(pubkey types.PubKey, power int64) []byte {
|
||||
return []byte(cmn.Fmt("val:%X/%d", pubkey.Data, power))
|
||||
return []byte(fmt.Sprintf("val:%X/%d", pubkey.Data, power))
|
||||
}
|
||||
|
||||
func isValidatorTx(tx []byte) bool {
|
||||
@@ -168,11 +167,11 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
|
||||
}
|
||||
|
||||
// update
|
||||
return app.updateValidator(types.Ed25519Validator(pubkey, int64(power)))
|
||||
return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, int64(power)))
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v types.Validator) types.ResponseDeliverTx {
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
|
||||
key := []byte("val:" + string(v.PubKey.Data))
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
|
@@ -12,11 +12,11 @@ import (
|
||||
|
||||
func InitChain(client abcicli.Client) error {
|
||||
total := 10
|
||||
vals := make([]types.Validator, total)
|
||||
vals := make([]types.ValidatorUpdate, total)
|
||||
for i := 0; i < total; i++ {
|
||||
pubkey := cmn.RandBytes(33)
|
||||
power := cmn.RandInt()
|
||||
vals[i] = types.Ed25519Validator(pubkey, int64(power))
|
||||
vals[i] = types.Ed25519ValidatorUpdate(pubkey, int64(power))
|
||||
}
|
||||
_, err := client.InitChainSync(types.RequestInitChain{
|
||||
Validators: vals,
|
||||
|
@@ -26,7 +26,7 @@ func startClient(abciType string) abcicli.Client {
|
||||
}
|
||||
|
||||
func setOption(client abcicli.Client, key, value string) {
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{key, value})
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
|
||||
if err != nil {
|
||||
panicf("setting %v=%v: \nerr: %v", key, value, err)
|
||||
}
|
||||
|
@@ -85,7 +85,7 @@ func NewGRPCApplication(app Application) *GRPCApplication {
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) {
|
||||
return &ResponseEcho{req.Message}, nil
|
||||
return &ResponseEcho{Message: req.Message}, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) {
|
||||
|
@@ -71,7 +71,7 @@ func encodeVarint(w io.Writer, i int64) (err error) {
|
||||
|
||||
func ToRequestEcho(message string) *Request {
|
||||
return &Request{
|
||||
Value: &Request_Echo{&RequestEcho{message}},
|
||||
Value: &Request_Echo{&RequestEcho{Message: message}},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,13 +95,13 @@ func ToRequestSetOption(req RequestSetOption) *Request {
|
||||
|
||||
func ToRequestDeliverTx(tx []byte) *Request {
|
||||
return &Request{
|
||||
Value: &Request_DeliverTx{&RequestDeliverTx{tx}},
|
||||
Value: &Request_DeliverTx{&RequestDeliverTx{Tx: tx}},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestCheckTx(tx []byte) *Request {
|
||||
return &Request{
|
||||
Value: &Request_CheckTx{&RequestCheckTx{tx}},
|
||||
Value: &Request_CheckTx{&RequestCheckTx{Tx: tx}},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,13 +139,13 @@ func ToRequestEndBlock(req RequestEndBlock) *Request {
|
||||
|
||||
func ToResponseException(errStr string) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Exception{&ResponseException{errStr}},
|
||||
Value: &Response_Exception{&ResponseException{Error: errStr}},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseEcho(message string) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Echo{&ResponseEcho{message}},
|
||||
Value: &Response_Echo{&ResponseEcho{Message: message}},
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -22,7 +22,7 @@ func TestMarshalJSON(t *testing.T) {
|
||||
Data: []byte("hello"),
|
||||
GasWanted: 43,
|
||||
Tags: []cmn.KVPair{
|
||||
{[]byte("pho"), []byte("bo")},
|
||||
{Key: []byte("pho"), Value: []byte("bo")},
|
||||
},
|
||||
}
|
||||
b, err = json.Marshal(&r1)
|
||||
@@ -83,9 +83,8 @@ func TestWriteReadMessage2(t *testing.T) {
|
||||
Log: phrase,
|
||||
GasWanted: 10,
|
||||
Tags: []cmn.KVPair{
|
||||
cmn.KVPair{[]byte("abc"), []byte("def")},
|
||||
cmn.KVPair{Key: []byte("abc"), Value: []byte("def")},
|
||||
},
|
||||
// Fee: cmn.KI64Pair{
|
||||
},
|
||||
// TODO: add the rest
|
||||
}
|
||||
|
@@ -4,8 +4,8 @@ const (
|
||||
PubKeyEd25519 = "ed25519"
|
||||
)
|
||||
|
||||
func Ed25519Validator(pubkey []byte, power int64) Validator {
|
||||
return Validator{
|
||||
func Ed25519ValidatorUpdate(pubkey []byte, power int64) ValidatorUpdate {
|
||||
return ValidatorUpdate{
|
||||
// Address:
|
||||
PubKey: PubKey{
|
||||
Type: PubKeyEd25519,
|
||||
|
13284
abci/types/types.pb.go
13284
abci/types/types.pb.go
File diff suppressed because it is too large
Load Diff
@@ -4,12 +4,22 @@ package types;
|
||||
// For more information on gogo.proto, see:
|
||||
// https://github.com/gogo/protobuf/blob/master/extensions.md
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
import "github.com/tendermint/tmlibs/common/types.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "github.com/tendermint/tendermint/libs/common/types.proto";
|
||||
|
||||
// This file is copied from http://github.com/tendermint/abci
|
||||
// NOTE: When using custom types, mind the warnings.
|
||||
// https://github.com/gogo/protobuf/blob/master/custom_types.md#warnings-and-issues
|
||||
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.goproto_registration) = true;
|
||||
// Generate tests
|
||||
option (gogoproto.populate_all) = true;
|
||||
option (gogoproto.equal_all) = true;
|
||||
option (gogoproto.testgen_all) = true;
|
||||
|
||||
//----------------------------------------
|
||||
// Request types
|
||||
|
||||
@@ -47,10 +57,10 @@ message RequestSetOption {
|
||||
}
|
||||
|
||||
message RequestInitChain {
|
||||
int64 time = 1;
|
||||
google.protobuf.Timestamp time = 1 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
|
||||
string chain_id = 2;
|
||||
ConsensusParams consensus_params = 3;
|
||||
repeated Validator validators = 4 [(gogoproto.nullable)=false];
|
||||
repeated ValidatorUpdate validators = 4 [(gogoproto.nullable)=false];
|
||||
bytes app_state_bytes = 5;
|
||||
}
|
||||
|
||||
@@ -61,10 +71,11 @@ message RequestQuery {
|
||||
bool prove = 4;
|
||||
}
|
||||
|
||||
// NOTE: validators here have empty pubkeys.
|
||||
message RequestBeginBlock {
|
||||
bytes hash = 1;
|
||||
Header header = 2 [(gogoproto.nullable)=false];
|
||||
repeated SigningValidator validators = 3 [(gogoproto.nullable)=false];
|
||||
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable)=false];
|
||||
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable)=false];
|
||||
}
|
||||
|
||||
@@ -132,7 +143,7 @@ message ResponseSetOption {
|
||||
|
||||
message ResponseInitChain {
|
||||
ConsensusParams consensus_params = 1;
|
||||
repeated Validator validators = 2 [(gogoproto.nullable)=false];
|
||||
repeated ValidatorUpdate validators = 2 [(gogoproto.nullable)=false];
|
||||
}
|
||||
|
||||
message ResponseQuery {
|
||||
@@ -159,7 +170,6 @@ message ResponseCheckTx {
|
||||
int64 gas_wanted = 5;
|
||||
int64 gas_used = 6;
|
||||
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
|
||||
common.KI64Pair fee = 8 [(gogoproto.nullable)=false];
|
||||
}
|
||||
|
||||
message ResponseDeliverTx {
|
||||
@@ -170,11 +180,10 @@ message ResponseDeliverTx {
|
||||
int64 gas_wanted = 5;
|
||||
int64 gas_used = 6;
|
||||
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
|
||||
common.KI64Pair fee = 8 [(gogoproto.nullable)=false];
|
||||
}
|
||||
|
||||
message ResponseEndBlock {
|
||||
repeated Validator validator_updates = 1 [(gogoproto.nullable)=false];
|
||||
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable)=false];
|
||||
ConsensusParams consensus_param_updates = 2;
|
||||
repeated common.KVPair tags = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
|
||||
}
|
||||
@@ -195,14 +204,13 @@ message ConsensusParams {
|
||||
BlockGossip block_gossip = 3;
|
||||
}
|
||||
|
||||
// BlockSize contain limits on the block size.
|
||||
// BlockSize contains limits on the block size.
|
||||
message BlockSize {
|
||||
int32 max_bytes = 1;
|
||||
int32 max_txs = 2;
|
||||
int64 max_gas = 3;
|
||||
int64 max_gas = 2;
|
||||
}
|
||||
|
||||
// TxSize contain limits on the tx size.
|
||||
// TxSize contains limits on the tx size.
|
||||
message TxSize {
|
||||
int32 max_bytes = 1;
|
||||
int64 max_gas = 2;
|
||||
@@ -215,38 +223,66 @@ message BlockGossip {
|
||||
int32 block_part_size_bytes = 1;
|
||||
}
|
||||
|
||||
message LastCommitInfo {
|
||||
int32 round = 1;
|
||||
repeated VoteInfo votes = 2 [(gogoproto.nullable)=false];
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Blockchain Types
|
||||
|
||||
// just the minimum the app might need
|
||||
message Header {
|
||||
// basics
|
||||
// basic block info
|
||||
string chain_id = 1 [(gogoproto.customname)="ChainID"];
|
||||
int64 height = 2;
|
||||
int64 time = 3;
|
||||
|
||||
// txs
|
||||
int32 num_txs = 4;
|
||||
google.protobuf.Timestamp time = 3 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
|
||||
int64 num_txs = 4;
|
||||
int64 total_txs = 5;
|
||||
|
||||
// hashes
|
||||
bytes last_block_hash = 6;
|
||||
bytes validators_hash = 7;
|
||||
bytes app_hash = 8;
|
||||
// prev block info
|
||||
BlockID last_block_id = 6 [(gogoproto.nullable)=false];
|
||||
|
||||
// consensus
|
||||
Validator proposer = 9 [(gogoproto.nullable)=false];
|
||||
// hashes of block data
|
||||
bytes last_commit_hash = 7; // commit from validators from the last block
|
||||
bytes data_hash = 8; // transactions
|
||||
|
||||
// hashes from the app output from the prev block
|
||||
bytes validators_hash = 9; // validators for the current block
|
||||
bytes next_validators_hash = 10; // validators for the next block
|
||||
bytes consensus_hash = 11; // consensus params for current block
|
||||
bytes app_hash = 12; // state after txs from the previous block
|
||||
bytes last_results_hash = 13;// root hash of all results from the txs from the previous block
|
||||
|
||||
// consensus info
|
||||
bytes evidence_hash = 14; // evidence included in the block
|
||||
bytes proposer_address = 15; // original proposer of the block
|
||||
}
|
||||
|
||||
message BlockID {
|
||||
bytes hash = 1;
|
||||
PartSetHeader parts_header = 2 [(gogoproto.nullable)=false];
|
||||
}
|
||||
|
||||
message PartSetHeader {
|
||||
int32 total = 1;
|
||||
bytes hash = 2;
|
||||
}
|
||||
|
||||
// Validator
|
||||
message Validator {
|
||||
bytes address = 1;
|
||||
PubKey pub_key = 2 [(gogoproto.nullable)=false];
|
||||
//PubKey pub_key = 2 [(gogoproto.nullable)=false];
|
||||
int64 power = 3;
|
||||
}
|
||||
|
||||
// Validator with an extra bool
|
||||
message SigningValidator {
|
||||
// ValidatorUpdate
|
||||
message ValidatorUpdate {
|
||||
PubKey pub_key = 1 [(gogoproto.nullable)=false];
|
||||
int64 power = 2;
|
||||
}
|
||||
|
||||
// VoteInfo
|
||||
message VoteInfo {
|
||||
Validator validator = 1 [(gogoproto.nullable)=false];
|
||||
bool signed_last_block = 2;
|
||||
}
|
||||
@@ -260,7 +296,7 @@ message Evidence {
|
||||
string type = 1;
|
||||
Validator validator = 2 [(gogoproto.nullable)=false];
|
||||
int64 height = 3;
|
||||
int64 time = 4;
|
||||
google.protobuf.Timestamp time = 4 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
|
||||
int64 total_voting_power = 5;
|
||||
}
|
||||
|
||||
|
@@ -1,31 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
asrt "github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConsensusParams(t *testing.T) {
|
||||
assert := asrt.New(t)
|
||||
|
||||
params := &ConsensusParams{
|
||||
BlockSize: &BlockSize{MaxGas: 12345},
|
||||
BlockGossip: &BlockGossip{BlockPartSizeBytes: 54321},
|
||||
}
|
||||
var noParams *ConsensusParams // nil
|
||||
|
||||
// no error with nil fields
|
||||
assert.Nil(noParams.GetBlockSize())
|
||||
assert.EqualValues(noParams.GetBlockSize().GetMaxGas(), 0)
|
||||
|
||||
// get values with real fields
|
||||
assert.NotNil(params.GetBlockSize())
|
||||
assert.EqualValues(params.GetBlockSize().GetMaxTxs(), 0)
|
||||
assert.EqualValues(params.GetBlockSize().GetMaxGas(), 12345)
|
||||
assert.NotNil(params.GetBlockGossip())
|
||||
assert.EqualValues(params.GetBlockGossip().GetBlockPartSizeBytes(), 54321)
|
||||
assert.Nil(params.GetTxSize())
|
||||
assert.EqualValues(params.GetTxSize().GetMaxBytes(), 0)
|
||||
|
||||
}
|
4737
abci/types/typespb_test.go
Normal file
4737
abci/types/typespb_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -2,58 +2,33 @@ package types
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"sort"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// Validators is a list of validators that implements the Sort interface
|
||||
type Validators []Validator
|
||||
// ValidatorUpdates is a list of validators that implements the Sort interface
|
||||
type ValidatorUpdates []ValidatorUpdate
|
||||
|
||||
var _ sort.Interface = (Validators)(nil)
|
||||
var _ sort.Interface = (ValidatorUpdates)(nil)
|
||||
|
||||
// All these methods for Validators:
|
||||
// All these methods for ValidatorUpdates:
|
||||
// Len, Less and Swap
|
||||
// are for Validators to implement sort.Interface
|
||||
// are for ValidatorUpdates to implement sort.Interface
|
||||
// which will be used by the sort package.
|
||||
// See Issue https://github.com/tendermint/abci/issues/212
|
||||
|
||||
func (v Validators) Len() int {
|
||||
func (v ValidatorUpdates) Len() int {
|
||||
return len(v)
|
||||
}
|
||||
|
||||
// XXX: doesn't distinguish same validator with different power
|
||||
func (v Validators) Less(i, j int) bool {
|
||||
func (v ValidatorUpdates) Less(i, j int) bool {
|
||||
return bytes.Compare(v[i].PubKey.Data, v[j].PubKey.Data) <= 0
|
||||
}
|
||||
|
||||
func (v Validators) Swap(i, j int) {
|
||||
func (v ValidatorUpdates) Swap(i, j int) {
|
||||
v1 := v[i]
|
||||
v[i] = v[j]
|
||||
v[j] = v1
|
||||
}
|
||||
|
||||
func ValidatorsString(vs Validators) string {
|
||||
s := make([]validatorPretty, len(vs))
|
||||
for i, v := range vs {
|
||||
s[i] = validatorPretty{
|
||||
Address: v.Address,
|
||||
PubKey: v.PubKey.Data,
|
||||
Power: v.Power,
|
||||
}
|
||||
}
|
||||
b, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
type validatorPretty struct {
|
||||
Address cmn.HexBytes `json:"address"`
|
||||
PubKey []byte `json:"pub_key"`
|
||||
Power int64 `json:"power"`
|
||||
}
|
||||
|
@@ -1,9 +1,9 @@
|
||||
package version
|
||||
|
||||
// NOTE: we should probably be versioning the ABCI and the abci-cli separately
|
||||
import (
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
const Maj = "0"
|
||||
const Min = "12"
|
||||
const Fix = "0"
|
||||
// TODO: eliminate this after some version refactor
|
||||
|
||||
const Version = "0.12.0"
|
||||
const Version = version.ABCIVersion
|
||||
|
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/tendermint/go-amino"
|
||||
|
||||
proto "github.com/tendermint/tendermint/benchmarks/proto"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
)
|
||||
@@ -16,7 +16,7 @@ func BenchmarkEncodeStatusWire(b *testing.B) {
|
||||
b.StopTimer()
|
||||
cdc := amino.NewCodec()
|
||||
ctypes.RegisterAmino(cdc)
|
||||
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
|
||||
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
|
||||
status := &ctypes.ResultStatus{
|
||||
NodeInfo: p2p.NodeInfo{
|
||||
ID: nodeKey.ID(),
|
||||
@@ -52,7 +52,7 @@ func BenchmarkEncodeNodeInfoWire(b *testing.B) {
|
||||
b.StopTimer()
|
||||
cdc := amino.NewCodec()
|
||||
ctypes.RegisterAmino(cdc)
|
||||
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
|
||||
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
|
||||
nodeInfo := p2p.NodeInfo{
|
||||
ID: nodeKey.ID(),
|
||||
Moniker: "SOMENAME",
|
||||
@@ -77,7 +77,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
|
||||
b.StopTimer()
|
||||
cdc := amino.NewCodec()
|
||||
ctypes.RegisterAmino(cdc)
|
||||
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
|
||||
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
|
||||
nodeInfo := p2p.NodeInfo{
|
||||
ID: nodeKey.ID(),
|
||||
Moniker: "SOMENAME",
|
||||
@@ -98,7 +98,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
|
||||
|
||||
func BenchmarkEncodeNodeInfoProto(b *testing.B) {
|
||||
b.StopTimer()
|
||||
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
|
||||
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
|
||||
nodeID := string(nodeKey.ID())
|
||||
someName := "SOMENAME"
|
||||
someAddr := "SOMEADDR"
|
||||
|
@@ -6,8 +6,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@@ -29,10 +29,10 @@ eg, L = latency = 0.1s
|
||||
*/
|
||||
|
||||
const (
|
||||
requestIntervalMS = 100
|
||||
maxTotalRequesters = 1000
|
||||
requestIntervalMS = 2
|
||||
maxTotalRequesters = 600
|
||||
maxPendingRequests = maxTotalRequesters
|
||||
maxPendingRequestsPerPeer = 50
|
||||
maxPendingRequestsPerPeer = 20
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending us data at at least that rate, we
|
||||
@@ -219,14 +219,12 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
request := pool.requesters[height]
|
||||
|
||||
if request.block == nil {
|
||||
panic("Expected block to be non-nil")
|
||||
peerID := request.getPeerID()
|
||||
if peerID != p2p.ID("") {
|
||||
// RemovePeer will redo all requesters associated with this peer.
|
||||
pool.removePeer(peerID)
|
||||
}
|
||||
|
||||
// RemovePeer will redo all requesters associated with this peer.
|
||||
pool.removePeer(request.peerID)
|
||||
return request.peerID
|
||||
return peerID
|
||||
}
|
||||
|
||||
// TODO: ensure that blocks come in order for each peer.
|
||||
@@ -367,10 +365,10 @@ func (pool *BlockPool) debug() string {
|
||||
nextHeight := pool.height + pool.requestersLen()
|
||||
for h := pool.height; h < nextHeight; h++ {
|
||||
if pool.requesters[h] == nil {
|
||||
str += cmn.Fmt("H(%v):X ", h)
|
||||
str += fmt.Sprintf("H(%v):X ", h)
|
||||
} else {
|
||||
str += cmn.Fmt("H(%v):", h)
|
||||
str += cmn.Fmt("B?(%v) ", pool.requesters[h].block != nil)
|
||||
str += fmt.Sprintf("H(%v):", h)
|
||||
str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil)
|
||||
}
|
||||
}
|
||||
return str
|
||||
|
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -25,7 +24,7 @@ func makePeers(numPeers int, minHeight, maxHeight int64) map[p2p.ID]testPeer {
|
||||
peers := make(map[p2p.ID]testPeer, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerID := p2p.ID(cmn.RandStr(12))
|
||||
height := minHeight + rand.Int63n(maxHeight-minHeight)
|
||||
height := minHeight + cmn.RandInt63n(maxHeight-minHeight)
|
||||
peers[peerID] = testPeer{peerID, height}
|
||||
}
|
||||
return peers
|
||||
@@ -80,7 +79,7 @@ func TestBasic(t *testing.T) {
|
||||
}
|
||||
// Request desired, pretend like we got the block immediately.
|
||||
go func() {
|
||||
block := &types.Block{Header: &types.Header{Height: request.Height}}
|
||||
block := &types.Block{Header: types.Header{Height: request.Height}}
|
||||
pool.AddBlock(request.PeerID, block, 123)
|
||||
t.Logf("Added block from peer %v (height: %v)", request.PeerID, request.Height)
|
||||
}()
|
||||
|
@@ -18,7 +18,8 @@ const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
|
||||
trySyncIntervalMS = 50
|
||||
trySyncIntervalMS = 10
|
||||
|
||||
// stop syncing when last block's time is
|
||||
// within this much of the system time.
|
||||
// stopSyncingDurationMinutes = 10
|
||||
@@ -76,8 +77,9 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *Bl
|
||||
store.Height()))
|
||||
}
|
||||
|
||||
const capacity = 1000 // must be bigger than peers count
|
||||
requestsCh := make(chan BlockRequest, capacity)
|
||||
requestsCh := make(chan BlockRequest, maxTotalRequesters)
|
||||
|
||||
const capacity = 1000 // must be bigger than peers count
|
||||
errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock
|
||||
|
||||
pool := NewBlockPool(
|
||||
@@ -107,9 +109,6 @@ func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
|
||||
|
||||
// OnStart implements cmn.Service.
|
||||
func (bcR *BlockchainReactor) OnStart() error {
|
||||
if err := bcR.BaseReactor.OnStart(); err != nil {
|
||||
return err
|
||||
}
|
||||
if bcR.fastSync {
|
||||
err := bcR.pool.Start()
|
||||
if err != nil {
|
||||
@@ -122,7 +121,6 @@ func (bcR *BlockchainReactor) OnStart() error {
|
||||
|
||||
// OnStop implements cmn.Service.
|
||||
func (bcR *BlockchainReactor) OnStop() {
|
||||
bcR.BaseReactor.OnStop()
|
||||
bcR.pool.Stop()
|
||||
}
|
||||
|
||||
@@ -203,13 +201,12 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerHeight(src.ID(), msg.Height)
|
||||
default:
|
||||
bcR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
}
|
||||
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.)
|
||||
func (bcR *BlockchainReactor) poolRoutine() {
|
||||
|
||||
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
@@ -224,6 +221,8 @@ func (bcR *BlockchainReactor) poolRoutine() {
|
||||
lastHundred := time.Now()
|
||||
lastRate := 0.0
|
||||
|
||||
didProcessCh := make(chan struct{}, 1)
|
||||
|
||||
FOR_LOOP:
|
||||
for {
|
||||
select {
|
||||
@@ -239,14 +238,17 @@ FOR_LOOP:
|
||||
// The pool handles timeouts, just let it go.
|
||||
continue FOR_LOOP
|
||||
}
|
||||
|
||||
case err := <-bcR.errorsCh:
|
||||
peer := bcR.Switch.Peers().Get(err.peerID)
|
||||
if peer != nil {
|
||||
bcR.Switch.StopPeerForError(peer, err)
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() // nolint: errcheck
|
||||
|
||||
case <-switchToConsensusTicker.C:
|
||||
height, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
outbound, inbound, _ := bcR.Switch.NumPeers()
|
||||
@@ -261,60 +263,78 @@ FOR_LOOP:
|
||||
|
||||
break FOR_LOOP
|
||||
}
|
||||
|
||||
case <-trySyncTicker.C: // chan time
|
||||
// This loop can be slow as long as it's doing syncing work.
|
||||
SYNC_LOOP:
|
||||
for i := 0; i < 10; i++ {
|
||||
// See if there are any blocks to sync.
|
||||
first, second := bcR.pool.PeekTwoBlocks()
|
||||
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
|
||||
if first == nil || second == nil {
|
||||
// We need both to sync the first block.
|
||||
break SYNC_LOOP
|
||||
select {
|
||||
case didProcessCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
case <-didProcessCh:
|
||||
// NOTE: It is a subtle mistake to process more than a single block
|
||||
// at a time (e.g. 10) here, because we only TrySend 1 request per
|
||||
// loop. The ratio mismatch can result in starving of blocks, a
|
||||
// sudden burst of requests and responses, and repeat.
|
||||
// Consequently, it is better to split these routines rather than
|
||||
// coupling them as it's written here. TODO uncouple from request
|
||||
// routine.
|
||||
|
||||
// See if there are any blocks to sync.
|
||||
first, second := bcR.pool.PeekTwoBlocks()
|
||||
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
|
||||
if first == nil || second == nil {
|
||||
// We need both to sync the first block.
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
// Try again quickly next loop.
|
||||
didProcessCh <- struct{}{}
|
||||
}
|
||||
|
||||
firstParts := first.MakePartSet(state.ConsensusParams.BlockPartSizeBytes)
|
||||
firstPartsHeader := firstParts.Header()
|
||||
firstID := types.BlockID{first.Hash(), firstPartsHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err := state.Validators.VerifyCommit(
|
||||
chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error in validation", "err", err)
|
||||
peerID := bcR.pool.RedoRequest(first.Height)
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer != nil {
|
||||
// NOTE: we've already removed the peer's request, but we
|
||||
// still need to clean up the rest.
|
||||
bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
|
||||
}
|
||||
firstParts := first.MakePartSet(state.ConsensusParams.BlockPartSizeBytes)
|
||||
firstPartsHeader := firstParts.Header()
|
||||
firstID := types.BlockID{first.Hash(), firstPartsHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err := state.Validators.VerifyCommit(
|
||||
chainID, firstID, first.Height, second.LastCommit)
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
bcR.pool.PopRequest()
|
||||
|
||||
// TODO: batch saves so we dont persist to disk every block
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: same thing for app - but we would need a way to
|
||||
// get the hash without persisting the state
|
||||
var err error
|
||||
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error in validation", "err", err)
|
||||
peerID := bcR.pool.RedoRequest(first.Height)
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer != nil {
|
||||
bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
|
||||
}
|
||||
break SYNC_LOOP
|
||||
} else {
|
||||
bcR.pool.PopRequest()
|
||||
// TODO This is bad, are we zombie?
|
||||
cmn.PanicQ(fmt.Sprintf("Failed to process committed block (%d:%X): %v",
|
||||
first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
// TODO: batch saves so we dont persist to disk every block
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: same thing for app - but we would need a way to
|
||||
// get the hash without persisting the state
|
||||
var err error
|
||||
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v",
|
||||
first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
|
||||
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
|
||||
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
continue FOR_LOOP
|
||||
|
||||
case <-bcR.Quit():
|
||||
break FOR_LOOP
|
||||
}
|
||||
@@ -336,11 +356,11 @@ type BlockchainMessage interface{}
|
||||
|
||||
func RegisterBlockchainMessages(cdc *amino.Codec) {
|
||||
cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
|
||||
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/mempool/BlockRequest", nil)
|
||||
cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/mempool/BlockResponse", nil)
|
||||
cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/mempool/NoBlockResponse", nil)
|
||||
cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/mempool/StatusResponse", nil)
|
||||
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil)
|
||||
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil)
|
||||
cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil)
|
||||
cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil)
|
||||
cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil)
|
||||
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil)
|
||||
}
|
||||
|
||||
func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
|
||||
@@ -358,7 +378,7 @@ type bcBlockRequestMessage struct {
|
||||
}
|
||||
|
||||
func (m *bcBlockRequestMessage) String() string {
|
||||
return cmn.Fmt("[bcBlockRequestMessage %v]", m.Height)
|
||||
return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height)
|
||||
}
|
||||
|
||||
type bcNoBlockResponseMessage struct {
|
||||
@@ -366,7 +386,7 @@ type bcNoBlockResponseMessage struct {
|
||||
}
|
||||
|
||||
func (brm *bcNoBlockResponseMessage) String() string {
|
||||
return cmn.Fmt("[bcNoBlockResponseMessage %d]", brm.Height)
|
||||
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", brm.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
@@ -376,7 +396,7 @@ type bcBlockResponseMessage struct {
|
||||
}
|
||||
|
||||
func (m *bcBlockResponseMessage) String() string {
|
||||
return cmn.Fmt("[bcBlockResponseMessage %v]", m.Block.Height)
|
||||
return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
@@ -386,7 +406,7 @@ type bcStatusRequestMessage struct {
|
||||
}
|
||||
|
||||
func (m *bcStatusRequestMessage) String() string {
|
||||
return cmn.Fmt("[bcStatusRequestMessage %v]", m.Height)
|
||||
return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
@@ -396,5 +416,5 @@ type bcStatusResponseMessage struct {
|
||||
}
|
||||
|
||||
func (m *bcStatusResponseMessage) String() string {
|
||||
return cmn.Fmt("[bcStatusResponseMessage %v]", m.Height)
|
||||
return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height)
|
||||
}
|
||||
|
@@ -156,7 +156,7 @@ func makeTxs(height int64) (txs []types.Tx) {
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit))
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit), nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
@@ -206,3 +206,4 @@ func (tp *bcrTestPeer) IsPersistent() bool { return true }
|
||||
func (tp *bcrTestPeer) Get(s string) interface{} { return s }
|
||||
func (tp *bcrTestPeer) Set(string, interface{}) {}
|
||||
func (tp *bcrTestPeer) RemoteIP() net.IP { return []byte{127, 0, 0, 1} }
|
||||
func (tp *bcrTestPeer) OriginalAddr() *p2p.NetAddress { return nil }
|
||||
|
@@ -148,10 +148,10 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
|
||||
}
|
||||
height := block.Height
|
||||
if g, w := height, bs.Height()+1; g != w {
|
||||
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
|
||||
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
|
||||
}
|
||||
if !blockParts.IsComplete() {
|
||||
cmn.PanicSanity(cmn.Fmt("BlockStore can only save complete block part sets"))
|
||||
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save complete block part sets"))
|
||||
}
|
||||
|
||||
// Save block meta
|
||||
@@ -188,7 +188,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
|
||||
|
||||
func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
|
||||
if height != bs.Height()+1 {
|
||||
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
|
||||
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
|
||||
}
|
||||
partBytes := cdc.MustMarshalBinaryBare(part)
|
||||
bs.db.Set(calcBlockPartKey(height, index), partBytes)
|
||||
@@ -224,7 +224,7 @@ type BlockStoreStateJSON struct {
|
||||
func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
|
||||
bytes, err := cdc.MarshalJSON(bsj)
|
||||
if err != nil {
|
||||
cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err))
|
||||
cmn.PanicSanity(fmt.Sprintf("Could not marshal state bytes: %v", err))
|
||||
}
|
||||
db.SetSync(blockStoreKey, bytes)
|
||||
}
|
||||
|
@@ -6,7 +6,6 @@ import (
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -14,6 +13,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
func TestLoadBlockStoreStateJSON(t *testing.T) {
|
||||
@@ -49,7 +49,7 @@ func TestNewBlockStore(t *testing.T) {
|
||||
return nil, nil
|
||||
})
|
||||
require.NotNil(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data)
|
||||
assert.Contains(t, panicErr.Error(), tt.wantErr, "#%d data: %q", i, tt.data)
|
||||
assert.Contains(t, fmt.Sprintf("%#v", panicErr), tt.wantErr, "#%d data: %q", i, tt.data)
|
||||
}
|
||||
|
||||
db.Set(blockStoreKey, nil)
|
||||
@@ -70,7 +70,7 @@ var (
|
||||
part1 = partSet.GetPart(0)
|
||||
part2 = partSet.GetPart(1)
|
||||
seenCommit1 = &types.Commit{Precommits: []*types.Vote{{Height: 10,
|
||||
Timestamp: time.Now().UTC()}}}
|
||||
Timestamp: tmtime.Now()}}}
|
||||
)
|
||||
|
||||
// TODO: This test should be simplified ...
|
||||
@@ -91,7 +91,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
block := makeBlock(bs.Height()+1, state)
|
||||
validPartSet := block.MakePartSet(2)
|
||||
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
|
||||
Timestamp: time.Now().UTC()}}}
|
||||
Timestamp: tmtime.Now()}}}
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
|
||||
|
||||
@@ -103,7 +103,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
Height: 1,
|
||||
NumTxs: 100,
|
||||
ChainID: "block_test",
|
||||
Time: time.Now(),
|
||||
Time: tmtime.Now(),
|
||||
}
|
||||
header2 := header1
|
||||
header2.Height = 4
|
||||
@@ -111,7 +111,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
// End of setup, test data
|
||||
|
||||
commitAtH10 := &types.Commit{Precommits: []*types.Vote{{Height: 10,
|
||||
Timestamp: time.Now().UTC()}}}
|
||||
Timestamp: tmtime.Now()}}}
|
||||
tuples := []struct {
|
||||
block *types.Block
|
||||
parts *types.PartSet
|
||||
@@ -126,7 +126,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
eraseSeenCommitInDB bool
|
||||
}{
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
},
|
||||
@@ -137,19 +137,19 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header2, commitAtH10),
|
||||
block: newBlock(header2, commitAtH10),
|
||||
parts: uncontiguousPartSet,
|
||||
wantPanic: "only save contiguous blocks", // and incomplete and uncontiguous parts
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: incompletePartSet,
|
||||
wantPanic: "only save complete block", // incomplete parts
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
corruptCommitInDB: true, // Corrupt the DB's commit entry
|
||||
@@ -157,7 +157,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
wantPanic: "unmarshal to types.BlockMeta failed",
|
||||
@@ -165,7 +165,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
|
||||
@@ -174,7 +174,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
|
||||
@@ -183,7 +183,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
block: newBlock(&header1, commitAtH10),
|
||||
block: newBlock(header1, commitAtH10),
|
||||
parts: validPartSet,
|
||||
seenCommit: seenCommit1,
|
||||
|
||||
@@ -238,7 +238,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
if subStr := tuple.wantPanic; subStr != "" {
|
||||
if panicErr == nil {
|
||||
t.Errorf("#%d: want a non-nil panic", i)
|
||||
} else if got := panicErr.Error(); !strings.Contains(got, subStr) {
|
||||
} else if got := fmt.Sprintf("%#v", panicErr); !strings.Contains(got, subStr) {
|
||||
t.Errorf("#%d:\n\tgotErr: %q\nwant substring: %q", i, got, subStr)
|
||||
}
|
||||
continue
|
||||
@@ -335,7 +335,7 @@ func TestBlockFetchAtHeight(t *testing.T) {
|
||||
|
||||
partSet := block.MakePartSet(2)
|
||||
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
|
||||
Timestamp: time.Now().UTC()}}}
|
||||
Timestamp: tmtime.Now()}}}
|
||||
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
|
||||
@@ -375,7 +375,7 @@ func doFn(fn func() (interface{}, error)) (res interface{}, err error, panicErr
|
||||
return res, err, panicErr
|
||||
}
|
||||
|
||||
func newBlock(hdr *types.Header, lastCommit *types.Commit) *types.Block {
|
||||
func newBlock(hdr types.Header, lastCommit *types.Commit) *types.Block {
|
||||
return &types.Block{
|
||||
Header: hdr,
|
||||
LastCommit: lastCommit,
|
||||
|
@@ -2,12 +2,12 @@ package blockchain
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
RegisterBlockchainMessages(cdc)
|
||||
crypto.RegisterAmino(cdc)
|
||||
types.RegisterBlockAmino(cdc)
|
||||
}
|
||||
|
@@ -4,7 +4,7 @@ import (
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
crypto "github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
@@ -37,7 +37,7 @@ func main() {
|
||||
*chainID,
|
||||
*addr,
|
||||
pv,
|
||||
crypto.GenPrivKeyEd25519(),
|
||||
ed25519.GenPrivKey(),
|
||||
)
|
||||
err := rs.Start()
|
||||
if err != nil {
|
||||
|
@@ -5,8 +5,8 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// GenNodeKeyCmd allows the generation of a node key. It prints node's ID to
|
||||
|
@@ -1,15 +1,16 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"time"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
// InitFilesCmd initialises a fresh Tendermint Core instance.
|
||||
@@ -52,8 +53,8 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
logger.Info("Found genesis file", "path", genFile)
|
||||
} else {
|
||||
genDoc := types.GenesisDoc{
|
||||
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
|
||||
GenesisTime: time.Now(),
|
||||
ChainID: fmt.Sprintf("test-chain-%v", cmn.RandStr(6)),
|
||||
GenesisTime: tmtime.Now(),
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
}
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
|
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
|
||||
"github.com/tendermint/tendermint/lite/proxy"
|
||||
rpcclient "github.com/tendermint/tendermint/rpc/client"
|
||||
)
|
||||
@@ -66,17 +65,21 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// First, connect a client
|
||||
logger.Info("Connecting to source HTTP client...")
|
||||
node := rpcclient.NewHTTP(nodeAddr, "/websocket")
|
||||
|
||||
cert, err := proxy.GetCertifier(chainID, home, nodeAddr)
|
||||
logger.Info("Constructing Verifier...")
|
||||
cert, err := proxy.NewVerifier(chainID, home, node, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
return cmn.ErrorWrap(err, "constructing Verifier")
|
||||
}
|
||||
cert.SetLogger(logger)
|
||||
sc := proxy.SecureClient(node, cert)
|
||||
|
||||
logger.Info("Starting proxy...")
|
||||
err = proxy.StartProxy(sc, listenAddr, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
return cmn.ErrorWrap(err, "starting proxy")
|
||||
}
|
||||
|
||||
cmn.TrapSignal(func() {
|
||||
|
@@ -5,8 +5,8 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
|
||||
// ResetAllCmd removes the database of this Tendermint core
|
||||
|
@@ -6,15 +6,15 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -76,7 +76,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
genVals := make([]types.GenesisValidator, nValidators)
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
nodeDirName := cmn.Fmt("%s%d", nodeDirPrefix, i)
|
||||
nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i)
|
||||
nodeDir := filepath.Join(outputDir, nodeDirName)
|
||||
config.SetRoot(nodeDir)
|
||||
|
||||
@@ -98,7 +98,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
for i := 0; i < nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i+nValidators))
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i+nValidators))
|
||||
config.SetRoot(nodeDir)
|
||||
|
||||
err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm)
|
||||
@@ -112,14 +112,14 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Generate genesis doc from generated validators
|
||||
genDoc := &types.GenesisDoc{
|
||||
GenesisTime: time.Now(),
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: "chain-" + cmn.RandStr(6),
|
||||
Validators: genVals,
|
||||
}
|
||||
|
||||
// Write genesis file.
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
|
||||
if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil {
|
||||
_ = os.RemoveAll(outputDir)
|
||||
return err
|
||||
@@ -159,7 +159,7 @@ func hostnameOrIP(i int) string {
|
||||
func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error {
|
||||
persistentPeers := make([]string, nValidators+nNonValidators)
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
@@ -170,7 +170,7 @@ func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error {
|
||||
persistentPeersList := strings.Join(persistentPeers, ",")
|
||||
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
config.P2P.PersistentPeers = persistentPeersList
|
||||
config.P2P.AddrBookStrict = false
|
||||
|
@@ -2,11 +2,11 @@ package commands
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
crypto.RegisterAmino(cdc)
|
||||
cryptoAmino.RegisterAmino(cdc)
|
||||
}
|
||||
|
@@ -21,3 +21,4 @@ ignore:
|
||||
- "docs"
|
||||
- "DOCKER"
|
||||
- "scripts"
|
||||
- "**/*.pb.go"
|
||||
|
118
config/config.go
118
config/config.go
@@ -94,7 +94,6 @@ func (cfg *Config) SetRoot(root string) *Config {
|
||||
|
||||
// BaseConfig defines the base configuration for a Tendermint node
|
||||
type BaseConfig struct {
|
||||
|
||||
// chainID is unexposed and immutable but here for convenience
|
||||
chainID string
|
||||
|
||||
@@ -102,49 +101,49 @@ type BaseConfig struct {
|
||||
// This should be set in viper so it can unmarshal into this struct
|
||||
RootDir string `mapstructure:"home"`
|
||||
|
||||
// Path to the JSON file containing the initial validator set and other meta data
|
||||
Genesis string `mapstructure:"genesis_file"`
|
||||
|
||||
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
PrivValidator string `mapstructure:"priv_validator_file"`
|
||||
|
||||
// A JSON file containing the private key to use for p2p authenticated encryption
|
||||
NodeKey string `mapstructure:"node_key_file"`
|
||||
|
||||
// A custom human readable name for this node
|
||||
Moniker string `mapstructure:"moniker"`
|
||||
|
||||
// TCP or UNIX socket address for Tendermint to listen on for
|
||||
// connections from an external PrivValidator process
|
||||
PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"`
|
||||
|
||||
// TCP or UNIX socket address of the ABCI application,
|
||||
// or the name of an ABCI application compiled in with the Tendermint binary
|
||||
ProxyApp string `mapstructure:"proxy_app"`
|
||||
|
||||
// Mechanism to connect to the ABCI application: socket | grpc
|
||||
ABCI string `mapstructure:"abci"`
|
||||
|
||||
// Output level for logging
|
||||
LogLevel string `mapstructure:"log_level"`
|
||||
|
||||
// TCP or UNIX socket address for the profiling server to listen on
|
||||
ProfListenAddress string `mapstructure:"prof_laddr"`
|
||||
// A custom human readable name for this node
|
||||
Moniker string `mapstructure:"moniker"`
|
||||
|
||||
// If this node is many blocks behind the tip of the chain, FastSync
|
||||
// allows them to catchup quickly by downloading blocks in parallel
|
||||
// and verifying their commits
|
||||
FastSync bool `mapstructure:"fast_sync"`
|
||||
|
||||
// If true, query the ABCI app on connecting to a new peer
|
||||
// so the app can decide if we should keep the connection or not
|
||||
FilterPeers bool `mapstructure:"filter_peers"` // false
|
||||
|
||||
// Database backend: leveldb | memdb
|
||||
DBBackend string `mapstructure:"db_backend"`
|
||||
|
||||
// Database directory
|
||||
DBPath string `mapstructure:"db_dir"`
|
||||
|
||||
// Output level for logging
|
||||
LogLevel string `mapstructure:"log_level"`
|
||||
|
||||
// Path to the JSON file containing the initial validator set and other meta data
|
||||
Genesis string `mapstructure:"genesis_file"`
|
||||
|
||||
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
PrivValidator string `mapstructure:"priv_validator_file"`
|
||||
|
||||
// TCP or UNIX socket address for Tendermint to listen on for
|
||||
// connections from an external PrivValidator process
|
||||
PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"`
|
||||
|
||||
// A JSON file containing the private key to use for p2p authenticated encryption
|
||||
NodeKey string `mapstructure:"node_key_file"`
|
||||
|
||||
// Mechanism to connect to the ABCI application: socket | grpc
|
||||
ABCI string `mapstructure:"abci"`
|
||||
|
||||
// TCP or UNIX socket address for the profiling server to listen on
|
||||
ProfListenAddress string `mapstructure:"prof_laddr"`
|
||||
|
||||
// If true, query the ABCI app on connecting to a new peer
|
||||
// so the app can decide if we should keep the connection or not
|
||||
FilterPeers bool `mapstructure:"filter_peers"` // false
|
||||
}
|
||||
|
||||
// DefaultBaseConfig returns a default base configuration for a Tendermint node
|
||||
@@ -239,6 +238,8 @@ type RPCConfig struct {
|
||||
// If you want to accept more significant number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
// Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
// 1024 - 40 - 10 - 50 = 924 = ~900
|
||||
MaxOpenConnections int `mapstructure:"max_open_connections"`
|
||||
}
|
||||
|
||||
@@ -248,11 +249,9 @@ func DefaultRPCConfig() *RPCConfig {
|
||||
ListenAddress: "tcp://0.0.0.0:26657",
|
||||
|
||||
GRPCListenAddress: "",
|
||||
GRPCMaxOpenConnections: 900, // no ipv4
|
||||
GRPCMaxOpenConnections: 900,
|
||||
|
||||
Unsafe: false,
|
||||
// should be < {ulimit -Sn} - {MaxNumPeers} - {N of wal, db and other open files}
|
||||
// 1024 - 50 - 50 = 924 = ~900
|
||||
Unsafe: false,
|
||||
MaxOpenConnections: 900,
|
||||
}
|
||||
}
|
||||
@@ -284,7 +283,6 @@ type P2PConfig struct {
|
||||
Seeds string `mapstructure:"seeds"`
|
||||
|
||||
// Comma separated list of nodes to keep persistent connections to
|
||||
// Do not add private peers to this list if you don't want them advertised
|
||||
PersistentPeers string `mapstructure:"persistent_peers"`
|
||||
|
||||
// UPNP port forwarding
|
||||
@@ -294,10 +292,14 @@ type P2PConfig struct {
|
||||
AddrBook string `mapstructure:"addr_book_file"`
|
||||
|
||||
// Set true for strict address routability rules
|
||||
// Set false for private or local networks
|
||||
AddrBookStrict bool `mapstructure:"addr_book_strict"`
|
||||
|
||||
// Maximum number of peers to connect to
|
||||
MaxNumPeers int `mapstructure:"max_num_peers"`
|
||||
// Maximum number of inbound peers
|
||||
MaxNumInboundPeers int `mapstructure:"max_num_inbound_peers"`
|
||||
|
||||
// Maximum number of outbound peers to connect to, excluding persistent peers
|
||||
MaxNumOutboundPeers int `mapstructure:"max_num_outbound_peers"`
|
||||
|
||||
// Time to wait before flushing messages out on the connection, in ms
|
||||
FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"`
|
||||
@@ -347,11 +349,12 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
UPNP: false,
|
||||
AddrBook: defaultAddrBookPath,
|
||||
AddrBookStrict: true,
|
||||
MaxNumPeers: 50,
|
||||
MaxNumInboundPeers: 40,
|
||||
MaxNumOutboundPeers: 10,
|
||||
FlushThrottleTimeout: 100,
|
||||
MaxPacketMsgPayloadSize: 1024, // 1 kB
|
||||
SendRate: 512000, // 500 kB/s
|
||||
RecvRate: 512000, // 500 kB/s
|
||||
MaxPacketMsgPayloadSize: 1024, // 1 kB
|
||||
SendRate: 5120000, // 5 mB/s
|
||||
RecvRate: 5120000, // 5 mB/s
|
||||
PexReactor: true,
|
||||
SeedMode: false,
|
||||
AllowDuplicateIP: true, // so non-breaking yet
|
||||
@@ -418,8 +421,10 @@ func DefaultMempoolConfig() *MempoolConfig {
|
||||
RecheckEmpty: true,
|
||||
Broadcast: true,
|
||||
WalPath: filepath.Join(defaultDataDir, "mempool.wal"),
|
||||
Size: 100000,
|
||||
CacheSize: 100000,
|
||||
// Each signature verification takes .5ms, size reduced until we implement
|
||||
// ABCI Recheck
|
||||
Size: 5000,
|
||||
CacheSize: 10000,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -464,6 +469,9 @@ type ConsensusConfig struct {
|
||||
// Reactor sleep duration parameters are in milliseconds
|
||||
PeerGossipSleepDuration int `mapstructure:"peer_gossip_sleep_duration"`
|
||||
PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"`
|
||||
|
||||
// Block time parameters in milliseconds. Corresponds to the minimum time increment between consecutive blocks.
|
||||
BlockTimeIota int `mapstructure:"blocktime_iota"`
|
||||
}
|
||||
|
||||
// DefaultConsensusConfig returns a default configuration for the consensus service
|
||||
@@ -482,6 +490,7 @@ func DefaultConsensusConfig() *ConsensusConfig {
|
||||
CreateEmptyBlocksInterval: 0,
|
||||
PeerGossipSleepDuration: 100,
|
||||
PeerQueryMaj23SleepDuration: 2000,
|
||||
BlockTimeIota: 1000,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -498,9 +507,17 @@ func TestConsensusConfig() *ConsensusConfig {
|
||||
cfg.SkipTimeoutCommit = true
|
||||
cfg.PeerGossipSleepDuration = 5
|
||||
cfg.PeerQueryMaj23SleepDuration = 250
|
||||
cfg.BlockTimeIota = 10
|
||||
return cfg
|
||||
}
|
||||
|
||||
// MinValidVoteTime returns the minimum acceptable block time.
|
||||
// See the [BFT time spec](https://godoc.org/github.com/tendermint/tendermint/docs/spec/consensus/bft-time.md).
|
||||
func (cfg *ConsensusConfig) MinValidVoteTime(lastBlockTime time.Time) time.Time {
|
||||
return lastBlockTime.
|
||||
Add(time.Duration(cfg.BlockTimeIota) * time.Millisecond)
|
||||
}
|
||||
|
||||
// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step
|
||||
func (cfg *ConsensusConfig) WaitForTxs() bool {
|
||||
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
|
||||
@@ -557,8 +574,8 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) {
|
||||
//-----------------------------------------------------------------------------
|
||||
// TxIndexConfig
|
||||
|
||||
// TxIndexConfig defines the configuration for the transaction
|
||||
// indexer, including tags to index.
|
||||
// TxIndexConfig defines the configuration for the transaction indexer,
|
||||
// including tags to index.
|
||||
type TxIndexConfig struct {
|
||||
// What indexer to use for transactions
|
||||
//
|
||||
@@ -567,16 +584,21 @@ type TxIndexConfig struct {
|
||||
// 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
Indexer string `mapstructure:"indexer"`
|
||||
|
||||
// Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||
// Comma-separated list of tags to index (by default the only tag is "tx.hash")
|
||||
//
|
||||
// You can also index transactions by height by adding "tx.height" tag here.
|
||||
//
|
||||
// It's recommended to index only a subset of tags due to possible memory
|
||||
// bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||
// transactions.
|
||||
IndexTags string `mapstructure:"index_tags"`
|
||||
|
||||
// When set to true, tells indexer to index all tags. Note this may be not
|
||||
// desirable (see the comment above). IndexTags has a precedence over
|
||||
// IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||
// When set to true, tells indexer to index all tags (predefined tags:
|
||||
// "tx.hash", "tx.height" and all tags from DeliverTx responses).
|
||||
//
|
||||
// Note this may be not desirable (see the comment above). IndexTags has a
|
||||
// precedence over IndexAllTags (i.e. when given both, IndexTags will be
|
||||
// indexed).
|
||||
IndexAllTags bool `mapstructure:"index_all_tags"`
|
||||
}
|
||||
|
||||
|
@@ -81,7 +81,7 @@ fast_sync = {{ .BaseConfig.FastSync }}
|
||||
db_backend = "{{ .BaseConfig.DBBackend }}"
|
||||
|
||||
# Database directory
|
||||
db_path = "{{ js .BaseConfig.DBPath }}"
|
||||
db_dir = "{{ js .BaseConfig.DBPath }}"
|
||||
|
||||
# Output level for logging, including package level options
|
||||
log_level = "{{ .BaseConfig.LogLevel }}"
|
||||
@@ -94,6 +94,10 @@ genesis_file = "{{ js .BaseConfig.Genesis }}"
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
priv_validator_file = "{{ js .BaseConfig.PrivValidator }}"
|
||||
|
||||
# TCP or UNIX socket address for Tendermint to listen on for
|
||||
# connections from an external PrivValidator process
|
||||
priv_validator_laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}"
|
||||
|
||||
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
|
||||
node_key_file = "{{ js .BaseConfig.NodeKey}}"
|
||||
|
||||
@@ -124,6 +128,8 @@ grpc_laddr = "{{ .RPC.GRPCListenAddress }}"
|
||||
# If you want to accept more significant number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
# 1024 - 40 - 10 - 50 = 924 = ~900
|
||||
grpc_max_open_connections = {{ .RPC.GRPCMaxOpenConnections }}
|
||||
|
||||
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
|
||||
@@ -134,6 +140,8 @@ unsafe = {{ .RPC.Unsafe }}
|
||||
# If you want to accept more significant number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
# 1024 - 40 - 10 - 50 = 924 = ~900
|
||||
max_open_connections = {{ .RPC.MaxOpenConnections }}
|
||||
|
||||
##### peer to peer configuration options #####
|
||||
@@ -152,7 +160,6 @@ external_address = "{{ .P2P.ExternalAddress }}"
|
||||
seeds = "{{ .P2P.Seeds }}"
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
# Do not add private peers to this list if you don't want them advertised
|
||||
persistent_peers = "{{ .P2P.PersistentPeers }}"
|
||||
|
||||
# UPNP port forwarding
|
||||
@@ -162,13 +169,17 @@ upnp = {{ .P2P.UPNP }}
|
||||
addr_book_file = "{{ js .P2P.AddrBook }}"
|
||||
|
||||
# Set true for strict address routability rules
|
||||
# Set false for private or local networks
|
||||
addr_book_strict = {{ .P2P.AddrBookStrict }}
|
||||
|
||||
# Time to wait before flushing messages out on the connection, in ms
|
||||
flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }}
|
||||
|
||||
# Maximum number of peers to connect to
|
||||
max_num_peers = {{ .P2P.MaxNumPeers }}
|
||||
# Maximum number of inbound peers
|
||||
max_num_inbound_peers = {{ .P2P.MaxNumInboundPeers }}
|
||||
|
||||
# Maximum number of outbound peers to connect to, excluding persistent peers
|
||||
max_num_outbound_peers = {{ .P2P.MaxNumOutboundPeers }}
|
||||
|
||||
# Maximum size of a message packet payload, in bytes
|
||||
max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }}
|
||||
@@ -240,16 +251,21 @@ peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }}
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "{{ .TxIndex.Indexer }}"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is tx hash)
|
||||
# Comma-separated list of tags to index (by default the only tag is "tx.hash")
|
||||
#
|
||||
# You can also index transactions by height by adding "tx.height" tag here.
|
||||
#
|
||||
# It's recommended to index only a subset of tags due to possible memory
|
||||
# bloat. This is, of course, depends on the indexer's DB and the volume of
|
||||
# transactions.
|
||||
index_tags = "{{ .TxIndex.IndexTags }}"
|
||||
|
||||
# When set to true, tells indexer to index all tags. Note this may be not
|
||||
# desirable (see the comment above). IndexTags has a precedence over
|
||||
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
|
||||
# When set to true, tells indexer to index all tags (predefined tags:
|
||||
# "tx.hash", "tx.height" and all tags from DeliverTx responses).
|
||||
#
|
||||
# Note this may be not desirable (see the comment above). IndexTags has a
|
||||
# precedence over IndexAllTags (i.e. when given both, IndexTags will be
|
||||
# indexed).
|
||||
index_all_tags = {{ .TxIndex.IndexAllTags }}
|
||||
|
||||
##### instrumentation configuration options #####
|
||||
|
@@ -2,14 +2,15 @@ package consensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -156,8 +157,8 @@ func TestByzantine(t *testing.T) {
|
||||
case <-done:
|
||||
case <-tick.C:
|
||||
for i, reactor := range reactors {
|
||||
t.Log(cmn.Fmt("Consensus Reactor %v", i))
|
||||
t.Log(cmn.Fmt("%v", reactor))
|
||||
t.Log(fmt.Sprintf("Consensus Reactor %v", i))
|
||||
t.Log(fmt.Sprintf("%v", reactor))
|
||||
}
|
||||
t.Fatalf("Timed out waiting for all validators to commit first block")
|
||||
}
|
||||
|
@@ -17,14 +17,15 @@ import (
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
@@ -75,7 +76,7 @@ func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartS
|
||||
ValidatorAddress: vs.PrivValidator.GetAddress(),
|
||||
Height: vs.Height,
|
||||
Round: vs.Round,
|
||||
Timestamp: time.Now().UTC(),
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: voteType,
|
||||
BlockID: types.BlockID{hash, header},
|
||||
}
|
||||
@@ -348,13 +349,13 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i))
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
for _, opt := range configOpts {
|
||||
opt(thisConfig)
|
||||
}
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.Validators(state.Validators)
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app)
|
||||
@@ -372,18 +373,21 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
||||
for i := 0; i < nPeers; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i))
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
var privVal types.PrivValidator
|
||||
if i < nValidators {
|
||||
privVal = privVals[i]
|
||||
} else {
|
||||
_, tempFilePath := cmn.Tempfile("priv_validator_")
|
||||
privVal = privval.GenFilePV(tempFilePath)
|
||||
tempFile, err := ioutil.TempFile("", "priv_validator_")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
privVal = privval.GenFilePV(tempFile.Name())
|
||||
}
|
||||
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.Validators(state.Validators)
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app)
|
||||
@@ -420,7 +424,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: time.Now(),
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
@@ -429,7 +433,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
|
||||
func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) {
|
||||
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
|
||||
s0, _ := sm.MakeGenesisState(genDoc)
|
||||
db := dbm.NewMemDB()
|
||||
db := dbm.NewMemDB() // remove this ?
|
||||
sm.SaveState(db, s0)
|
||||
return s0, privValidators
|
||||
}
|
||||
|
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -89,7 +88,7 @@ func deliverTxsRange(cs *ConsensusState, start, end int) {
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
||||
err := cs.mempool.CheckTx(txBytes, nil)
|
||||
if err != nil {
|
||||
panic(cmn.Fmt("Error after CheckTx: %v", err))
|
||||
panic(fmt.Sprintf("Error after CheckTx: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -100,7 +99,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
|
||||
|
||||
NTxs := 10000
|
||||
NTxs := 3000
|
||||
go deliverTxsRange(cs, 0, NTxs)
|
||||
|
||||
startTestRound(cs, height, round)
|
||||
@@ -126,7 +125,7 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(0))
|
||||
|
||||
resDeliver := app.DeliverTx(txBytes)
|
||||
assert.False(t, resDeliver.IsErr(), cmn.Fmt("expected no error. got %v", resDeliver))
|
||||
assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
|
||||
|
||||
resCommit := app.Commit()
|
||||
assert.True(t, len(resCommit.Data) > 0)
|
||||
@@ -149,7 +148,7 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
|
||||
// check for the tx
|
||||
for {
|
||||
txs := cs.mempool.Reap(1)
|
||||
txs := cs.mempool.ReapMaxBytes(len(txBytes))
|
||||
if len(txs) == 0 {
|
||||
emptyMempoolCh <- struct{}{}
|
||||
return
|
||||
@@ -190,7 +189,7 @@ func NewCounterApplication() *CounterApplication {
|
||||
}
|
||||
|
||||
func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
|
||||
return abci.ResponseInfo{Data: cmn.Fmt("txs:%v", app.txCount)}
|
||||
return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx {
|
||||
|
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -58,9 +59,6 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *Consens
|
||||
// broadcasted to other peers and starting state if we're not in fast sync.
|
||||
func (conR *ConsensusReactor) OnStart() error {
|
||||
conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync())
|
||||
if err := conR.BaseReactor.OnStart(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conR.subscribeToBroadcastEvents()
|
||||
|
||||
@@ -77,7 +75,6 @@ func (conR *ConsensusReactor) OnStart() error {
|
||||
// OnStop implements BaseService by unsubscribing from events and stopping
|
||||
// state.
|
||||
func (conR *ConsensusReactor) OnStop() {
|
||||
conR.BaseReactor.OnStop()
|
||||
conR.unsubscribeFromBroadcastEvents()
|
||||
conR.conS.Stop()
|
||||
if !conR.FastSync() {
|
||||
@@ -245,7 +242,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
"height", hb.Height, "round", hb.Round, "sequence", hb.Sequence,
|
||||
"valIdx", hb.ValidatorIndex, "valAddr", hb.ValidatorAddress)
|
||||
default:
|
||||
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
|
||||
case DataChannel:
|
||||
@@ -266,7 +263,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
}
|
||||
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
|
||||
default:
|
||||
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
|
||||
case VoteChannel:
|
||||
@@ -291,7 +288,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
|
||||
default:
|
||||
// don't punish (leave room for soft upgrades)
|
||||
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
|
||||
case VoteSetBitsChannel:
|
||||
@@ -323,11 +320,11 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
}
|
||||
default:
|
||||
// don't punish (leave room for soft upgrades)
|
||||
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
|
||||
default:
|
||||
conR.Logger.Error(cmn.Fmt("Unknown chId %X", chID))
|
||||
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -486,7 +483,7 @@ OUTER_LOOP:
|
||||
if prs.ProposalBlockParts == nil {
|
||||
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
|
||||
if blockMeta == nil {
|
||||
cmn.PanicCrisis(cmn.Fmt("Failed to load block %d when blockStore is at %d",
|
||||
cmn.PanicCrisis(fmt.Sprintf("Failed to load block %d when blockStore is at %d",
|
||||
prs.Height, conR.conS.blockStore.Height()))
|
||||
}
|
||||
ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader)
|
||||
@@ -1038,7 +1035,7 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida
|
||||
NOTE: This is wrong, 'round' could change.
|
||||
e.g. if orig round is not the same as block LastCommit round.
|
||||
if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round {
|
||||
cmn.PanicSanity(cmn.Fmt("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
|
||||
cmn.PanicSanity(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
|
||||
}
|
||||
*/
|
||||
if ps.PRS.CatchupCommitRound == round {
|
||||
@@ -1142,7 +1139,7 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) {
|
||||
}
|
||||
|
||||
func (ps *PeerState) setHasVote(height int64, round int, type_ byte, index int) {
|
||||
logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", cmn.Fmt("%d/%d", height, round))
|
||||
logger := ps.logger.With("peerH/R", fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", fmt.Sprintf("%d/%d", height, round))
|
||||
logger.Debug("setHasVote", "type", type_, "index", index)
|
||||
|
||||
// NOTE: some may be nil BitArrays -> no side effects.
|
||||
@@ -1169,7 +1166,7 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
|
||||
psCatchupCommitRound := ps.PRS.CatchupCommitRound
|
||||
psCatchupCommit := ps.PRS.CatchupCommit
|
||||
|
||||
startTime := time.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
|
||||
startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
|
||||
ps.PRS.Height = msg.Height
|
||||
ps.PRS.Round = msg.Round
|
||||
ps.PRS.Step = msg.Step
|
||||
|
@@ -4,15 +4,23 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -91,6 +99,121 @@ func TestReactorBasic(t *testing.T) {
|
||||
}, css)
|
||||
}
|
||||
|
||||
// Ensure we can process blocks with evidence
|
||||
func TestReactorWithEvidence(t *testing.T) {
|
||||
nValidators := 4
|
||||
testName := "consensus_reactor_test"
|
||||
tickerFunc := newMockTickerFunc(true)
|
||||
appFunc := newCounter
|
||||
|
||||
// heed the advice from https://www.sandimetz.com/blog/2016/1/20/the-wrong-abstraction
|
||||
// to unroll unwieldy abstractions. Here we duplicate the code from:
|
||||
// css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
css := make([]*ConsensusState, nValidators)
|
||||
logger := consensusLogger()
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
pv := privVals[i]
|
||||
// duplicate code from:
|
||||
// css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app)
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := bc.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(sync.Mutex)
|
||||
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
// mock the evidence pool
|
||||
// everyone includes evidence of another double signing
|
||||
vIdx := (i + 1) % nValidators
|
||||
evpool := newMockEvidencePool(privVals[vIdx].GetAddress())
|
||||
|
||||
// Make ConsensusState
|
||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
|
||||
cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
|
||||
eventBus.Start()
|
||||
cs.SetEventBus(eventBus)
|
||||
|
||||
cs.SetTimeoutTicker(tickerFunc())
|
||||
cs.SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||
|
||||
css[i] = cs
|
||||
}
|
||||
|
||||
reactors, eventChans, eventBuses := startConsensusNet(t, css, nValidators)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// wait till everyone makes the first new block with no evidence
|
||||
timeoutWaitGroup(t, nValidators, func(j int) {
|
||||
blockI := <-eventChans[j]
|
||||
block := blockI.(types.EventDataNewBlock).Block
|
||||
assert.True(t, len(block.Evidence.Evidence) == 0)
|
||||
}, css)
|
||||
|
||||
// second block should have evidence
|
||||
timeoutWaitGroup(t, nValidators, func(j int) {
|
||||
blockI := <-eventChans[j]
|
||||
block := blockI.(types.EventDataNewBlock).Block
|
||||
assert.True(t, len(block.Evidence.Evidence) > 0)
|
||||
}, css)
|
||||
}
|
||||
|
||||
// mock evidence pool returns no evidence for block 1,
|
||||
// and returnes one piece for all higher blocks. The one piece
|
||||
// is for a given validator at block 1.
|
||||
type mockEvidencePool struct {
|
||||
height int
|
||||
ev []types.Evidence
|
||||
}
|
||||
|
||||
func newMockEvidencePool(val []byte) *mockEvidencePool {
|
||||
return &mockEvidencePool{
|
||||
ev: []types.Evidence{types.NewMockGoodEvidence(1, 1, val)},
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: maxBytes is ignored
|
||||
func (m *mockEvidencePool) PendingEvidence(maxBytes int) []types.Evidence {
|
||||
if m.height > 0 {
|
||||
return m.ev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *mockEvidencePool) AddEvidence(types.Evidence) error { return nil }
|
||||
func (m *mockEvidencePool) Update(block *types.Block, state sm.State) {
|
||||
if m.height > 0 {
|
||||
if len(block.Evidence.Evidence) == 0 {
|
||||
panic("block has no evidence")
|
||||
}
|
||||
}
|
||||
m.height++
|
||||
}
|
||||
|
||||
//------------------------------------
|
||||
|
||||
// Ensure a testnet sends proposal heartbeats and makes blocks when there are txs
|
||||
func TestReactorProposalHeartbeats(t *testing.T) {
|
||||
N := 4
|
||||
@@ -174,14 +297,14 @@ func TestReactorRecordsBlockParts(t *testing.T) {
|
||||
require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
|
||||
}
|
||||
|
||||
// Test we record votes from other peers
|
||||
// Test we record votes from other peers.
|
||||
func TestReactorRecordsVotes(t *testing.T) {
|
||||
// create dummy peer
|
||||
// Create dummy peer.
|
||||
peer := p2pdummy.NewPeer()
|
||||
ps := NewPeerState(peer).SetLogger(log.TestingLogger())
|
||||
peer.Set(types.PeerStateKey, ps)
|
||||
|
||||
// create reactor
|
||||
// Create reactor.
|
||||
css := randConsensusNet(1, "consensus_reactor_records_votes_test", newMockTickerFunc(true), newPersistentKVStore)
|
||||
reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states
|
||||
reactor.SetEventBus(css[0].eventBus)
|
||||
@@ -199,7 +322,7 @@ func TestReactorRecordsVotes(t *testing.T) {
|
||||
ValidatorAddress: val.Address,
|
||||
Height: 2,
|
||||
Round: 0,
|
||||
Timestamp: time.Now().UTC(),
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: types.VoteTypePrevote,
|
||||
BlockID: types.BlockID{},
|
||||
}
|
||||
@@ -419,7 +542,7 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}
|
||||
err := validateBlock(newBlock, activeVals)
|
||||
assert.Nil(t, err)
|
||||
for _, tx := range txs {
|
||||
css[j].mempool.CheckTx(tx, nil)
|
||||
err := css[j].mempool.CheckTx(tx, nil)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}, css)
|
||||
|
@@ -227,7 +227,7 @@ func (h *Handshaker) NBlocks() int {
|
||||
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
|
||||
// Handshake is done via ABCI Info on the query conn.
|
||||
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{version.Version})
|
||||
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: version.Version})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error calling Info: %v", err)
|
||||
}
|
||||
@@ -264,15 +264,15 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
|
||||
stateBlockHeight := state.LastBlockHeight
|
||||
h.logger.Info("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight)
|
||||
|
||||
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain
|
||||
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain.
|
||||
if appBlockHeight == 0 {
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
nextVals := types.TM2PB.ValidatorUpdates(state.NextValidators) // state.Validators would work too.
|
||||
csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams)
|
||||
req := abci.RequestInitChain{
|
||||
Time: h.genDoc.GenesisTime.Unix(), // TODO
|
||||
Time: h.genDoc.GenesisTime,
|
||||
ChainId: h.genDoc.ChainID,
|
||||
ConsensusParams: csParams,
|
||||
Validators: validators,
|
||||
Validators: nextVals,
|
||||
AppStateBytes: h.genDoc.AppState,
|
||||
}
|
||||
res, err := proxyApp.Consensus().InitChainSync(req)
|
||||
@@ -280,11 +280,9 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if the app returned validators
|
||||
// or consensus params, update the state
|
||||
// with the them
|
||||
// If the app returned validators or consensus params, update the state.
|
||||
if len(res.Validators) > 0 {
|
||||
vals, err := types.PB2TM.Validators(res.Validators)
|
||||
vals, err := types.PB2TM.ValidatorUpdates(res.Validators)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -296,7 +294,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
|
||||
sm.SaveState(h.stateDB, state)
|
||||
}
|
||||
|
||||
// First handle edge cases and constraints on the storeBlockHeight
|
||||
// First handle edge cases and constraints on the storeBlockHeight.
|
||||
if storeBlockHeight == 0 {
|
||||
return appHash, checkAppHash(state, appHash)
|
||||
|
||||
@@ -306,11 +304,11 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
|
||||
|
||||
} else if storeBlockHeight < stateBlockHeight {
|
||||
// the state should never be ahead of the store (this is under tendermint's control)
|
||||
cmn.PanicSanity(cmn.Fmt("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
|
||||
cmn.PanicSanity(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
|
||||
|
||||
} else if storeBlockHeight > stateBlockHeight+1 {
|
||||
// store should be at most one ahead of the state (this is under tendermint's control)
|
||||
cmn.PanicSanity(cmn.Fmt("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
|
||||
cmn.PanicSanity(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
|
||||
}
|
||||
|
||||
var err error
|
||||
|
@@ -13,12 +13,12 @@ import (
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -34,7 +34,7 @@ func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console
|
||||
consensusState := newConsensusStateForReplay(config, csConfig)
|
||||
|
||||
if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil {
|
||||
cmn.Exit(cmn.Fmt("Error during consensus replay: %v", err))
|
||||
cmn.Exit(fmt.Sprintf("Error during consensus replay: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -302,12 +302,12 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
NewHandshaker(stateDB, state, blockStore, gdoc))
|
||||
err = proxyApp.Start()
|
||||
if err != nil {
|
||||
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
|
||||
cmn.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err))
|
||||
}
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
if err := eventBus.Start(); err != nil {
|
||||
cmn.Exit(cmn.Fmt("Failed to start event bus: %v", err))
|
||||
cmn.Exit(fmt.Sprintf("Failed to start event bus: %v", err))
|
||||
}
|
||||
|
||||
mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{}
|
||||
|
@@ -3,7 +3,6 @@ package consensus
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -20,15 +19,14 @@ import (
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
crypto "github.com/tendermint/tendermint/crypto"
|
||||
auto "github.com/tendermint/tendermint/libs/autofile"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var consensusReplayConfig *cfg.Config
|
||||
@@ -376,7 +374,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
defer proxyApp.Stop()
|
||||
|
||||
// get the latest app hash from the app
|
||||
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{""})
|
||||
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: ""})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -418,7 +416,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
validators := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
}); err != nil {
|
||||
@@ -455,7 +453,7 @@ func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, c
|
||||
}
|
||||
defer proxyApp.Stop()
|
||||
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
validators := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
}); err != nil {
|
||||
@@ -494,7 +492,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !found {
|
||||
return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1))
|
||||
return nil, nil, fmt.Errorf("WAL does not contain height %d.", 1)
|
||||
}
|
||||
defer gr.Close() // nolint: errcheck
|
||||
|
||||
@@ -531,11 +529,11 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
panic(err)
|
||||
}
|
||||
if block.Height != height+1 {
|
||||
panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1))
|
||||
panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1))
|
||||
}
|
||||
commitHeight := thisBlockCommit.Precommits[0].Height
|
||||
if commitHeight != height+1 {
|
||||
panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
|
||||
panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
commits = append(commits, thisBlockCommit)
|
||||
@@ -564,11 +562,11 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
panic(err)
|
||||
}
|
||||
if block.Height != height+1 {
|
||||
panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1))
|
||||
panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1))
|
||||
}
|
||||
commitHeight := thisBlockCommit.Precommits[0].Height
|
||||
if commitHeight != height+1 {
|
||||
panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
|
||||
panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
commits = append(commits, thisBlockCommit)
|
||||
@@ -641,7 +639,7 @@ func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||
func TestInitChainUpdateValidators(t *testing.T) {
|
||||
val, _ := types.RandValidator(true, 10)
|
||||
vals := types.NewValidatorSet([]*types.Validator{val})
|
||||
app := &initChainApp{vals: types.TM2PB.Validators(vals)}
|
||||
app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
|
||||
clientCreator := proxy.NewLocalClientCreator(app)
|
||||
|
||||
config := ResetConfig("proxy_test_")
|
||||
@@ -668,7 +666,7 @@ func TestInitChainUpdateValidators(t *testing.T) {
|
||||
assert.Equal(t, newValAddr, expectValAddr)
|
||||
}
|
||||
|
||||
func newInitChainApp(vals []abci.Validator) *initChainApp {
|
||||
func newInitChainApp(vals []abci.ValidatorUpdate) *initChainApp {
|
||||
return &initChainApp{
|
||||
vals: vals,
|
||||
}
|
||||
@@ -677,7 +675,7 @@ func newInitChainApp(vals []abci.Validator) *initChainApp {
|
||||
// returns the vals on InitChain
|
||||
type initChainApp struct {
|
||||
abci.BaseApplication
|
||||
vals []abci.Validator
|
||||
vals []abci.ValidatorUpdate
|
||||
}
|
||||
|
||||
func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain {
|
||||
|
@@ -12,6 +12,7 @@ import (
|
||||
fail "github.com/ebuchman/fail-test"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
@@ -74,14 +75,13 @@ type ConsensusState struct {
|
||||
privValidator types.PrivValidator // for signing votes
|
||||
|
||||
// services for creating and executing blocks
|
||||
// TODO: encapsulate all of this in one "BlockManager"
|
||||
blockExec *sm.BlockExecutor
|
||||
blockStore sm.BlockStore
|
||||
mempool sm.Mempool
|
||||
evpool sm.EvidencePool
|
||||
|
||||
// internal state
|
||||
mtx sync.Mutex
|
||||
mtx sync.RWMutex
|
||||
cstypes.RoundState
|
||||
state sm.State // State until height-1.
|
||||
|
||||
@@ -154,6 +154,7 @@ func NewConsensusState(
|
||||
cs.setProposal = cs.defaultSetProposal
|
||||
|
||||
cs.updateToState(state)
|
||||
|
||||
// Don't call scheduleRound0 yet.
|
||||
// We do that upon Start().
|
||||
cs.reconstructLastCommit(state)
|
||||
@@ -187,20 +188,29 @@ func WithMetrics(metrics *Metrics) CSOption {
|
||||
// String returns a string.
|
||||
func (cs *ConsensusState) String() string {
|
||||
// better not to access shared variables
|
||||
return cmn.Fmt("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step)
|
||||
return fmt.Sprintf("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step)
|
||||
}
|
||||
|
||||
// GetState returns a copy of the chain state.
|
||||
func (cs *ConsensusState) GetState() sm.State {
|
||||
cs.mtx.RLock()
|
||||
defer cs.mtx.RUnlock()
|
||||
return cs.state.Copy()
|
||||
}
|
||||
|
||||
// GetLastHeight returns the last height committed.
|
||||
// If there were no blocks, returns 0.
|
||||
func (cs *ConsensusState) GetLastHeight() int64 {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
return cs.state.Copy()
|
||||
|
||||
return cs.RoundState.Height - 1
|
||||
}
|
||||
|
||||
// GetRoundState returns a shallow copy of the internal consensus state.
|
||||
func (cs *ConsensusState) GetRoundState() *cstypes.RoundState {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
cs.mtx.RLock()
|
||||
defer cs.mtx.RUnlock()
|
||||
|
||||
rs := cs.RoundState // copy
|
||||
return &rs
|
||||
@@ -208,24 +218,24 @@ func (cs *ConsensusState) GetRoundState() *cstypes.RoundState {
|
||||
|
||||
// GetRoundStateJSON returns a json of RoundState, marshalled using go-amino.
|
||||
func (cs *ConsensusState) GetRoundStateJSON() ([]byte, error) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
cs.mtx.RLock()
|
||||
defer cs.mtx.RUnlock()
|
||||
|
||||
return cdc.MarshalJSON(cs.RoundState)
|
||||
}
|
||||
|
||||
// GetRoundStateSimpleJSON returns a json of RoundStateSimple, marshalled using go-amino.
|
||||
func (cs *ConsensusState) GetRoundStateSimpleJSON() ([]byte, error) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
cs.mtx.RLock()
|
||||
defer cs.mtx.RUnlock()
|
||||
|
||||
return cdc.MarshalJSON(cs.RoundState.RoundStateSimple())
|
||||
}
|
||||
|
||||
// GetValidators returns a copy of the current validators.
|
||||
func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
cs.mtx.RLock()
|
||||
defer cs.mtx.RUnlock()
|
||||
return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators
|
||||
}
|
||||
|
||||
@@ -245,8 +255,8 @@ func (cs *ConsensusState) SetTimeoutTicker(timeoutTicker TimeoutTicker) {
|
||||
|
||||
// LoadCommit loads the commit for a given height.
|
||||
func (cs *ConsensusState) LoadCommit(height int64) *types.Commit {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
cs.mtx.RLock()
|
||||
defer cs.mtx.RUnlock()
|
||||
if height == cs.blockStore.Height() {
|
||||
return cs.blockStore.LoadSeenCommit(height)
|
||||
}
|
||||
@@ -413,8 +423,8 @@ func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType)
|
||||
|
||||
// enterNewRound(height, 0) at cs.StartTime.
|
||||
func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) {
|
||||
//cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime)
|
||||
sleepDuration := rs.StartTime.Sub(time.Now()) // nolint: gotype, gosimple
|
||||
//cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime)
|
||||
sleepDuration := rs.StartTime.Sub(tmtime.Now()) // nolint: gotype, gosimple
|
||||
cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight)
|
||||
}
|
||||
|
||||
@@ -451,7 +461,7 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
|
||||
}
|
||||
added, err := lastPrecommits.AddVote(precommit)
|
||||
if !added || err != nil {
|
||||
cmn.PanicCrisis(cmn.Fmt("Failed to reconstruct LastCommit: %v", err))
|
||||
cmn.PanicCrisis(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err))
|
||||
}
|
||||
}
|
||||
if !lastPrecommits.HasTwoThirdsMajority() {
|
||||
@@ -464,13 +474,13 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
|
||||
// The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight.
|
||||
func (cs *ConsensusState) updateToState(state sm.State) {
|
||||
if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight {
|
||||
cmn.PanicSanity(cmn.Fmt("updateToState() expected state height of %v but found %v",
|
||||
cmn.PanicSanity(fmt.Sprintf("updateToState() expected state height of %v but found %v",
|
||||
cs.Height, state.LastBlockHeight))
|
||||
}
|
||||
if !cs.state.IsEmpty() && cs.state.LastBlockHeight+1 != cs.Height {
|
||||
// This might happen when someone else is mutating cs.state.
|
||||
// Someone forgot to pass in state.Copy() somewhere?!
|
||||
cmn.PanicSanity(cmn.Fmt("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v",
|
||||
cmn.PanicSanity(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v",
|
||||
cs.state.LastBlockHeight+1, cs.Height))
|
||||
}
|
||||
|
||||
@@ -507,7 +517,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
|
||||
// to be gathered for the first block.
|
||||
// And alternative solution that relies on clocks:
|
||||
// cs.StartTime = state.LastBlockTime.Add(timeoutCommit)
|
||||
cs.StartTime = cs.config.Commit(time.Now())
|
||||
cs.StartTime = cs.config.Commit(tmtime.Now())
|
||||
} else {
|
||||
cs.StartTime = cs.config.Commit(cs.CommitTime)
|
||||
}
|
||||
@@ -553,9 +563,30 @@ func (cs *ConsensusState) newStep() {
|
||||
// Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities.
|
||||
// ConsensusState must be locked before any internal state is updated.
|
||||
func (cs *ConsensusState) receiveRoutine(maxSteps int) {
|
||||
onExit := func(cs *ConsensusState) {
|
||||
// NOTE: the internalMsgQueue may have signed messages from our
|
||||
// priv_val that haven't hit the WAL, but its ok because
|
||||
// priv_val tracks LastSig
|
||||
|
||||
// close wal now that we're done writing to it
|
||||
cs.wal.Stop()
|
||||
cs.wal.Wait()
|
||||
|
||||
close(cs.done)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
cs.Logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack()))
|
||||
// stop gracefully
|
||||
//
|
||||
// NOTE: We most probably shouldn't be running any further when there is
|
||||
// some unexpected panic. Some unknown error happened, and so we don't
|
||||
// know if that will result in the validator signing an invalid thing. It
|
||||
// might be worthwhile to explore a mechanism for manual resuming via
|
||||
// some console or secure RPC system, but for now, halting the chain upon
|
||||
// unexpected consensus bugs sounds like the better option.
|
||||
onExit(cs)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -571,8 +602,8 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
|
||||
var mi msgInfo
|
||||
|
||||
select {
|
||||
case height := <-cs.mempool.TxsAvailable():
|
||||
cs.handleTxsAvailable(height)
|
||||
case <-cs.mempool.TxsAvailable():
|
||||
cs.handleTxsAvailable()
|
||||
case mi = <-cs.peerMsgQueue:
|
||||
cs.wal.Write(mi)
|
||||
// handles proposals, block parts, votes
|
||||
@@ -588,16 +619,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
|
||||
// go to the next step
|
||||
cs.handleTimeout(ti, rs)
|
||||
case <-cs.Quit():
|
||||
|
||||
// NOTE: the internalMsgQueue may have signed messages from our
|
||||
// priv_val that haven't hit the WAL, but its ok because
|
||||
// priv_val tracks LastSig
|
||||
|
||||
// close wal now that we're done writing to it
|
||||
cs.wal.Stop()
|
||||
cs.wal.Wait()
|
||||
|
||||
close(cs.done)
|
||||
onExit(cs)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -678,16 +700,16 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
|
||||
cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent())
|
||||
cs.enterNewRound(ti.Height, ti.Round+1)
|
||||
default:
|
||||
panic(cmn.Fmt("Invalid timeout step: %v", ti.Step))
|
||||
panic(fmt.Sprintf("Invalid timeout step: %v", ti.Step))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) handleTxsAvailable(height int64) {
|
||||
func (cs *ConsensusState) handleTxsAvailable() {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
// we only need to do this for round 0
|
||||
cs.enterPropose(height, 0)
|
||||
cs.enterPropose(cs.Height, 0)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -704,15 +726,15 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
|
||||
logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(fmt.Sprintf("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
|
||||
if now := time.Now(); cs.StartTime.After(now) {
|
||||
if now := tmtime.Now(); cs.StartTime.After(now) {
|
||||
logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now)
|
||||
}
|
||||
|
||||
logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(fmt.Sprintf("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
// Increment validators if necessary
|
||||
validators := cs.Validators
|
||||
@@ -799,10 +821,10 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
|
||||
logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(fmt.Sprintf("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(fmt.Sprintf("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPropose:
|
||||
@@ -882,7 +904,7 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
|
||||
cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""})
|
||||
}
|
||||
cs.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal)
|
||||
cs.Logger.Debug(cmn.Fmt("Signed proposal block: %v", block))
|
||||
cs.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block))
|
||||
} else {
|
||||
if !cs.replayMode {
|
||||
cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err)
|
||||
@@ -907,6 +929,8 @@ func (cs *ConsensusState) isProposalComplete() bool {
|
||||
}
|
||||
|
||||
// Create the next block to propose and return it.
|
||||
// We really only need to return the parts, but the block
|
||||
// is returned for convenience so we can log the proposal block.
|
||||
// Returns nil block upon error.
|
||||
// NOTE: keep it side-effect free for clarity.
|
||||
func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts *types.PartSet) {
|
||||
@@ -924,14 +948,25 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
|
||||
return
|
||||
}
|
||||
|
||||
maxBytes := cs.state.ConsensusParams.BlockSize.MaxBytes
|
||||
// bound evidence to 1/10th of the block
|
||||
evidence := cs.evpool.PendingEvidence(maxBytes / 10)
|
||||
// Mempool validated transactions
|
||||
txs := cs.mempool.Reap(cs.state.ConsensusParams.BlockSize.MaxTxs)
|
||||
block, parts := cs.state.MakeBlock(cs.Height, txs, commit)
|
||||
evidence := cs.evpool.PendingEvidence()
|
||||
block.AddEvidence(evidence)
|
||||
txs := cs.mempool.ReapMaxBytes(maxDataBytes(maxBytes, cs.state.Validators.Size(), len(evidence)))
|
||||
proposerAddr := cs.privValidator.GetAddress()
|
||||
block, parts := cs.state.MakeBlock(cs.Height, txs, commit, evidence, proposerAddr)
|
||||
|
||||
return block, parts
|
||||
}
|
||||
|
||||
func maxDataBytes(maxBytes, valsCount, evidenceCount int) int {
|
||||
return maxBytes -
|
||||
types.MaxAminoOverheadForBlock -
|
||||
types.MaxHeaderBytes -
|
||||
(valsCount * types.MaxVoteBytes) -
|
||||
(evidenceCount * types.MaxEvidenceBytes)
|
||||
}
|
||||
|
||||
// Enter: `timeoutPropose` after entering Propose.
|
||||
// Enter: proposal block and POL is ready.
|
||||
// Enter: any +2/3 prevotes for future round.
|
||||
@@ -939,7 +974,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
|
||||
// Otherwise vote nil.
|
||||
func (cs *ConsensusState) enterPrevote(height int64, round int) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) {
|
||||
cs.Logger.Debug(cmn.Fmt("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
cs.Logger.Debug(fmt.Sprintf("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -957,7 +992,7 @@ func (cs *ConsensusState) enterPrevote(height int64, round int) {
|
||||
// TODO: catchup event?
|
||||
}
|
||||
|
||||
cs.Logger.Info(cmn.Fmt("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
cs.Logger.Info(fmt.Sprintf("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
// Sign and broadcast vote as necessary
|
||||
cs.doPrevote(height, round)
|
||||
@@ -1003,13 +1038,13 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
|
||||
logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(fmt.Sprintf("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
if !cs.Votes.Prevotes(round).HasTwoThirdsAny() {
|
||||
cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
|
||||
cmn.PanicSanity(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
|
||||
}
|
||||
logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(fmt.Sprintf("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrevoteWait:
|
||||
@@ -1031,11 +1066,11 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
|
||||
logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(fmt.Sprintf("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(fmt.Sprintf("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommit:
|
||||
@@ -1063,7 +1098,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
// the latest POLRound should be this round.
|
||||
polRound, _ := cs.Votes.POLInfo()
|
||||
if polRound < round {
|
||||
cmn.PanicSanity(cmn.Fmt("This POLRound should be %v but got %", round, polRound))
|
||||
cmn.PanicSanity(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound))
|
||||
}
|
||||
|
||||
// +2/3 prevoted nil. Unlock and precommit nil.
|
||||
@@ -1097,7 +1132,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
|
||||
logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
|
||||
// Validate the block.
|
||||
if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil {
|
||||
cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
|
||||
cmn.PanicConsensus(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
|
||||
}
|
||||
cs.LockedRound = round
|
||||
cs.LockedBlock = cs.ProposalBlock
|
||||
@@ -1127,13 +1162,13 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) {
|
||||
logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(fmt.Sprintf("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
if !cs.Votes.Precommits(round).HasTwoThirdsAny() {
|
||||
cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
|
||||
cmn.PanicSanity(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
|
||||
}
|
||||
logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(fmt.Sprintf("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterPrecommitWait:
|
||||
@@ -1151,17 +1186,17 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
||||
logger := cs.Logger.With("height", height, "commitRound", commitRound)
|
||||
|
||||
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
|
||||
logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||
logger.Debug(fmt.Sprintf("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||
logger.Info(fmt.Sprintf("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
// Done enterCommit:
|
||||
// keep cs.Round the same, commitRound points to the right Precommits set.
|
||||
cs.updateRoundStep(cs.Round, cstypes.RoundStepCommit)
|
||||
cs.CommitRound = commitRound
|
||||
cs.CommitTime = time.Now()
|
||||
cs.CommitTime = tmtime.Now()
|
||||
cs.newStep()
|
||||
|
||||
// Maybe finalize immediately.
|
||||
@@ -1201,7 +1236,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) {
|
||||
logger := cs.Logger.With("height", height)
|
||||
|
||||
if cs.Height != height {
|
||||
cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
|
||||
cmn.PanicSanity(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
|
||||
}
|
||||
|
||||
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
||||
@@ -1223,7 +1258,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) {
|
||||
// Increment height and goto cstypes.RoundStepNewHeight
|
||||
func (cs *ConsensusState) finalizeCommit(height int64) {
|
||||
if cs.Height != height || cs.Step != cstypes.RoundStepCommit {
|
||||
cs.Logger.Debug(cmn.Fmt("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step))
|
||||
cs.Logger.Debug(fmt.Sprintf("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1231,21 +1266,21 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
|
||||
block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts
|
||||
|
||||
if !ok {
|
||||
cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, commit does not have two thirds majority"))
|
||||
cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority"))
|
||||
}
|
||||
if !blockParts.HasHeader(blockID.PartsHeader) {
|
||||
cmn.PanicSanity(cmn.Fmt("Expected ProposalBlockParts header to be commit header"))
|
||||
cmn.PanicSanity(fmt.Sprintf("Expected ProposalBlockParts header to be commit header"))
|
||||
}
|
||||
if !block.HashesTo(blockID.Hash) {
|
||||
cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
|
||||
cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
|
||||
}
|
||||
if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil {
|
||||
cmn.PanicConsensus(cmn.Fmt("+2/3 committed an invalid block: %v", err))
|
||||
cmn.PanicConsensus(fmt.Sprintf("+2/3 committed an invalid block: %v", err))
|
||||
}
|
||||
|
||||
cs.Logger.Info(cmn.Fmt("Finalizing commit of block with %d txs", block.NumTxs),
|
||||
cs.Logger.Info(fmt.Sprintf("Finalizing commit of block with %d txs", block.NumTxs),
|
||||
"height", block.Height, "hash", block.Hash(), "root", block.AppHash)
|
||||
cs.Logger.Info(cmn.Fmt("%v", block))
|
||||
cs.Logger.Info(fmt.Sprintf("%v", block))
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
@@ -1416,7 +1451,11 @@ func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p
|
||||
}
|
||||
if added && cs.ProposalBlockParts.IsComplete() {
|
||||
// Added and completed!
|
||||
_, err = cdc.UnmarshalBinaryReader(cs.ProposalBlockParts.GetReader(), &cs.ProposalBlock, int64(cs.state.ConsensusParams.BlockSize.MaxBytes))
|
||||
_, err = cdc.UnmarshalBinaryReader(
|
||||
cs.ProposalBlockParts.GetReader(),
|
||||
&cs.ProposalBlock,
|
||||
int64(cs.state.ConsensusParams.BlockSize.MaxBytes),
|
||||
)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
@@ -1497,7 +1536,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
return added, err
|
||||
}
|
||||
|
||||
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
|
||||
cs.Logger.Info(fmt.Sprintf("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
|
||||
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
|
||||
cs.evsw.FireEvent(types.EventVote, vote)
|
||||
|
||||
@@ -1613,7 +1652,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
cs.enterPrecommitWait(height, vote.Round)
|
||||
}
|
||||
default:
|
||||
panic(cmn.Fmt("Unexpected vote type %X", vote.Type)) // go-wire should prevent this.
|
||||
panic(fmt.Sprintf("Unexpected vote type %X", vote.Type)) // go-wire should prevent this.
|
||||
}
|
||||
|
||||
return
|
||||
@@ -1622,12 +1661,13 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
|
||||
addr := cs.privValidator.GetAddress()
|
||||
valIndex, _ := cs.Validators.GetByAddress(addr)
|
||||
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: valIndex,
|
||||
Height: cs.Height,
|
||||
Round: cs.Round,
|
||||
Timestamp: time.Now().UTC(),
|
||||
Timestamp: cs.voteTime(),
|
||||
Type: type_,
|
||||
BlockID: types.BlockID{hash, header},
|
||||
}
|
||||
@@ -1635,6 +1675,23 @@ func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSet
|
||||
return vote, err
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) voteTime() time.Time {
|
||||
now := tmtime.Now()
|
||||
minVoteTime := now
|
||||
// TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil,
|
||||
// even if cs.LockedBlock != nil. See https://github.com/tendermint/spec.
|
||||
if cs.LockedBlock != nil {
|
||||
minVoteTime = cs.config.MinValidVoteTime(cs.LockedBlock.Time)
|
||||
} else if cs.ProposalBlock != nil {
|
||||
minVoteTime = cs.config.MinValidVoteTime(cs.ProposalBlock.Time)
|
||||
}
|
||||
|
||||
if now.After(minVoteTime) {
|
||||
return now
|
||||
}
|
||||
return minVoteTime
|
||||
}
|
||||
|
||||
// sign the vote and publish on internalMsgQueue
|
||||
func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.PartSetHeader) *types.Vote {
|
||||
// if we don't have a key or we're not in the validator set, do nothing
|
||||
|
@@ -8,10 +8,9 @@ import (
|
||||
"time"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -84,7 +83,7 @@ func TestStateProposerSelection0(t *testing.T) {
|
||||
|
||||
prop = cs1.GetRoundState().Validators.GetProposer()
|
||||
if !bytes.Equal(prop.Address, vss[1].GetAddress()) {
|
||||
panic(cmn.Fmt("expected proposer to be validator %d. Got %X", 1, prop.Address))
|
||||
panic(fmt.Sprintf("expected proposer to be validator %d. Got %X", 1, prop.Address))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,8 +103,9 @@ func TestStateProposerSelection2(t *testing.T) {
|
||||
// everyone just votes nil. we get a new proposer each round
|
||||
for i := 0; i < len(vss); i++ {
|
||||
prop := cs1.GetRoundState().Validators.GetProposer()
|
||||
if !bytes.Equal(prop.Address, vss[(i+2)%len(vss)].GetAddress()) {
|
||||
panic(cmn.Fmt("expected proposer to be validator %d. Got %X", (i+2)%len(vss), prop.Address))
|
||||
correctProposer := vss[(i+2)%len(vss)].GetAddress()
|
||||
if !bytes.Equal(prop.Address, correctProposer) {
|
||||
panic(fmt.Sprintf("expected RoundState.Validators.GetProposer() to be validator %d. Got %X", (i+2)%len(vss), prop.Address))
|
||||
}
|
||||
|
||||
rs := cs1.GetRoundState()
|
||||
@@ -444,7 +444,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
|
||||
// now we're on a new round and are the proposer
|
||||
if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) {
|
||||
panic(cmn.Fmt("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock))
|
||||
panic(fmt.Sprintf("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock))
|
||||
}
|
||||
|
||||
<-voteCh // prevote
|
||||
|
@@ -6,9 +6,9 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
type RoundVoteSet struct {
|
||||
@@ -169,7 +169,7 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet {
|
||||
case types.VoteTypePrecommit:
|
||||
return rvs.Precommits
|
||||
default:
|
||||
cmn.PanicSanity(cmn.Fmt("Unexpected vote type %X", type_))
|
||||
cmn.PanicSanity(fmt.Sprintf("Unexpected vote type %X", type_))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -219,7 +219,7 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string {
|
||||
voteSetString = roundVoteSet.Precommits.StringShort()
|
||||
vsStrings = append(vsStrings, voteSetString)
|
||||
}
|
||||
return cmn.Fmt(`HeightVoteSet{H:%v R:0~%v
|
||||
return fmt.Sprintf(`HeightVoteSet{H:%v R:0~%v
|
||||
%s %v
|
||||
%s}`,
|
||||
hvs.height, hvs.round,
|
||||
|
@@ -1,12 +1,12 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
var config *cfg.Config // NOTE: must be reset for each _test.go file
|
||||
@@ -55,14 +55,14 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali
|
||||
ValidatorIndex: valIndex,
|
||||
Height: height,
|
||||
Round: round,
|
||||
Timestamp: time.Now().UTC(),
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: types.VoteTypePrecommit,
|
||||
BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}},
|
||||
}
|
||||
chainID := config.ChainID()
|
||||
err := privVal.SignVote(chainID, vote)
|
||||
if err != nil {
|
||||
panic(cmn.Fmt("Error signing vote: %v", err))
|
||||
panic(fmt.Sprintf("Error signing vote: %v", err))
|
||||
return nil
|
||||
}
|
||||
return vote
|
||||
|
@@ -4,8 +4,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
@@ -5,8 +5,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
@@ -2,12 +2,12 @@ package types
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
func BenchmarkRoundStateDeepCopy(b *testing.B) {
|
||||
@@ -23,11 +23,11 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
|
||||
Hash: cmn.RandBytes(20),
|
||||
},
|
||||
}
|
||||
sig := crypto.SignatureEd25519{}
|
||||
sig := make([]byte, ed25519.SignatureSize)
|
||||
for i := 0; i < nval; i++ {
|
||||
precommits[i] = &types.Vote{
|
||||
ValidatorAddress: types.Address(cmn.RandBytes(20)),
|
||||
Timestamp: time.Now(),
|
||||
Timestamp: tmtime.Now(),
|
||||
BlockID: blockID,
|
||||
Signature: sig,
|
||||
}
|
||||
@@ -38,9 +38,9 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
|
||||
}
|
||||
// Random block
|
||||
block := &types.Block{
|
||||
Header: &types.Header{
|
||||
Header: types.Header{
|
||||
ChainID: cmn.RandStr(12),
|
||||
Time: time.Now(),
|
||||
Time: tmtime.Now(),
|
||||
LastBlockID: blockID,
|
||||
LastCommitHash: cmn.RandBytes(20),
|
||||
DataHash: cmn.RandBytes(20),
|
||||
@@ -50,7 +50,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
|
||||
LastResultsHash: cmn.RandBytes(20),
|
||||
EvidenceHash: cmn.RandBytes(20),
|
||||
},
|
||||
Data: &types.Data{
|
||||
Data: types.Data{
|
||||
Txs: txs,
|
||||
},
|
||||
Evidence: types.EvidenceData{},
|
||||
@@ -62,7 +62,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
|
||||
parts := block.MakePartSet(4096)
|
||||
// Random Proposal
|
||||
proposal := &types.Proposal{
|
||||
Timestamp: time.Now(),
|
||||
Timestamp: tmtime.Now(),
|
||||
BlockPartsHeader: types.PartSetHeader{
|
||||
Hash: cmn.RandBytes(20),
|
||||
},
|
||||
@@ -73,8 +73,8 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
|
||||
// TODO: hvs :=
|
||||
|
||||
rs := &RoundState{
|
||||
StartTime: time.Now(),
|
||||
CommitTime: time.Now(),
|
||||
StartTime: tmtime.Now(),
|
||||
CommitTime: tmtime.Now(),
|
||||
Validators: vset,
|
||||
Proposal: proposal,
|
||||
ProposalBlock: block,
|
||||
|
@@ -2,11 +2,11 @@ package types
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
crypto.RegisterAmino(cdc)
|
||||
types.RegisterBlockAmino(cdc)
|
||||
}
|
||||
|
@@ -1,8 +1,6 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
import "fmt"
|
||||
|
||||
// kind of arbitrary
|
||||
var Spec = "1" // async
|
||||
@@ -10,4 +8,4 @@ var Major = "0" //
|
||||
var Minor = "2" // replay refactor
|
||||
var Revision = "2" // validation -> commit
|
||||
|
||||
var Version = cmn.Fmt("v%s/%s.%s.%s", Spec, Major, Minor, Revision)
|
||||
var Version = fmt.Sprintf("v%s/%s.%s.%s", Spec, Major, Minor, Revision)
|
||||
|
@@ -11,9 +11,10 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
auto "github.com/tendermint/tendermint/libs/autofile"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -119,8 +120,8 @@ func (wal *baseWAL) Write(msg WALMessage) {
|
||||
}
|
||||
|
||||
// Write the wal message
|
||||
if err := wal.enc.Encode(&TimedWALMessage{time.Now(), msg}); err != nil {
|
||||
panic(cmn.Fmt("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg))
|
||||
if err := wal.enc.Encode(&TimedWALMessage{tmtime.Now(), msg}); err != nil {
|
||||
panic(fmt.Sprintf("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -134,7 +135,7 @@ func (wal *baseWAL) WriteSync(msg WALMessage) {
|
||||
|
||||
wal.Write(msg)
|
||||
if err := wal.group.Flush(); err != nil {
|
||||
panic(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
|
||||
panic(fmt.Sprintf("Error flushing consensus wal buf to file. Error: %v \n", err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -13,14 +13,14 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
auto "github.com/tendermint/tendermint/libs/autofile"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// WALWithNBlocks generates a consensus WAL. It does this by spining up a
|
||||
|
@@ -3,20 +3,21 @@ package consensus
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
// "sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/consensus/types"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWALEncoderDecoder(t *testing.T) {
|
||||
now := time.Now()
|
||||
now := tmtime.Now()
|
||||
msgs := []TimedWALMessage{
|
||||
TimedWALMessage{Time: now, Msg: EndHeightMessage{0}},
|
||||
TimedWALMessage{Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}},
|
||||
@@ -54,8 +55,8 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
||||
|
||||
h := int64(3)
|
||||
gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{})
|
||||
assert.NoError(t, err, cmn.Fmt("expected not to err on height %d", h))
|
||||
assert.True(t, found, cmn.Fmt("expected to find end height for %d", h))
|
||||
assert.NoError(t, err, fmt.Sprintf("expected not to err on height %d", h))
|
||||
assert.True(t, found, fmt.Sprintf("expected to find end height for %d", h))
|
||||
assert.NotNil(t, gr, "expected group not to be nil")
|
||||
defer gr.Close()
|
||||
|
||||
@@ -64,7 +65,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
||||
assert.NoError(t, err, "expected to decode a message")
|
||||
rs, ok := msg.Msg.(tmtypes.EventDataRoundState)
|
||||
assert.True(t, ok, "expected message of type EventDataRoundState")
|
||||
assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height"))
|
||||
assert.Equal(t, rs.Height, h+1, fmt.Sprintf("wrong height"))
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -93,7 +94,7 @@ func benchmarkWalDecode(b *testing.B, n int) {
|
||||
enc := NewWALEncoder(buf)
|
||||
|
||||
data := nBytes(n)
|
||||
enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second)})
|
||||
enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()})
|
||||
|
||||
encoded := buf.Bytes()
|
||||
|
||||
|
@@ -2,7 +2,7 @@ package consensus
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
@@ -10,5 +10,5 @@ var cdc = amino.NewCodec()
|
||||
func init() {
|
||||
RegisterConsensusMessages(cdc)
|
||||
RegisterWALMessages(cdc)
|
||||
crypto.RegisterAmino(cdc)
|
||||
types.RegisterBlockAmino(cdc)
|
||||
}
|
||||
|
@@ -3,8 +3,15 @@
|
||||
crypto is the cryptographic package adapted for Tendermint's uses
|
||||
|
||||
## Importing it
|
||||
To get the interfaces,
|
||||
`import "github.com/tendermint/tendermint/crypto"`
|
||||
|
||||
For any specific algorithm, use its specific module e.g.
|
||||
`import "github.com/tendermint/tendermint/crypto/ed25519"`
|
||||
|
||||
If you want to decode bytes into one of the types, but don't care about the specific algorithm, use
|
||||
`import "github.com/tendermint/tendermint/crypto/amino"`
|
||||
|
||||
## Binary encoding
|
||||
|
||||
For Binary encoding, please refer to the [Tendermint encoding spec](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md).
|
||||
@@ -16,10 +23,8 @@ crypto `.Bytes()` uses Amino:binary encoding, but Amino:JSON is also supported.
|
||||
```go
|
||||
Example Amino:JSON encodings:
|
||||
|
||||
crypto.PrivKeyEd25519 - {"type":"954568A3288910","value":"EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="}
|
||||
crypto.SignatureEd25519 - {"type":"6BF5903DA1DB28","value":"77sQNZOrf7ltExpf7AV1WaYPCHbyRLgjBsoWVzcduuLk+jIGmYk+s5R6Emm29p12HeiNAuhUJgdFGmwkpeGJCA=="}
|
||||
crypto.PubKeyEd25519 - {"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="}
|
||||
ed25519.PrivKeyEd25519 - {"type":"954568A3288910","value":"EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="}
|
||||
ed25519.PubKeyEd25519 - {"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="}
|
||||
crypto.PrivKeySecp256k1 - {"type":"019E82E1B0F798","value":"zx4Pnh67N+g2V+5vZbQzEyRerX9c4ccNZOVzM9RvJ0Y="}
|
||||
crypto.SignatureSecp256k1 - {"type":"6D1EA416E1FEE8","value":"MEUCIQCIg5TqS1l7I+MKTrSPIuUN2+4m5tA29dcauqn3NhEJ2wIgICaZ+lgRc5aOTVahU/XoLopXKn8BZcl0bnuYWLvohR8="}
|
||||
crypto.PubKeySecp256k1 - {"type":"F8CCEAEB5AE980","value":"A8lPKJXcNl5VHt1FK8a244K9EJuS4WX1hFBnwisi0IJx"}
|
||||
```
|
||||
|
@@ -1,37 +0,0 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
amino "github.com/tendermint/go-amino"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
// NOTE: It's important that there be no conflicts here,
|
||||
// as that would change the canonical representations,
|
||||
// and therefore change the address.
|
||||
// TODO: Add feature to go-amino to ensure that there
|
||||
// are no conflicts.
|
||||
RegisterAmino(cdc)
|
||||
}
|
||||
|
||||
// RegisterAmino registers all crypto related types in the given (amino) codec.
|
||||
func RegisterAmino(cdc *amino.Codec) {
|
||||
cdc.RegisterInterface((*PubKey)(nil), nil)
|
||||
cdc.RegisterConcrete(PubKeyEd25519{},
|
||||
"tendermint/PubKeyEd25519", nil)
|
||||
cdc.RegisterConcrete(PubKeySecp256k1{},
|
||||
"tendermint/PubKeySecp256k1", nil)
|
||||
|
||||
cdc.RegisterInterface((*PrivKey)(nil), nil)
|
||||
cdc.RegisterConcrete(PrivKeyEd25519{},
|
||||
"tendermint/PrivKeyEd25519", nil)
|
||||
cdc.RegisterConcrete(PrivKeySecp256k1{},
|
||||
"tendermint/PrivKeySecp256k1", nil)
|
||||
|
||||
cdc.RegisterInterface((*Signature)(nil), nil)
|
||||
cdc.RegisterConcrete(SignatureEd25519{},
|
||||
"tendermint/SignatureEd25519", nil)
|
||||
cdc.RegisterConcrete(SignatureSecp256k1{},
|
||||
"tendermint/SignatureSecp256k1", nil)
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package crypto
|
||||
package armor
|
||||
|
||||
import (
|
||||
"bytes"
|
@@ -1,4 +1,4 @@
|
||||
package crypto
|
||||
package armor
|
||||
|
||||
import (
|
||||
"testing"
|
30
crypto/crypto.go
Normal file
30
crypto/crypto.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
type PrivKey interface {
|
||||
Bytes() []byte
|
||||
Sign(msg []byte) ([]byte, error)
|
||||
PubKey() PubKey
|
||||
Equals(PrivKey) bool
|
||||
}
|
||||
|
||||
// An address is a []byte, but hex-encoded even in JSON.
|
||||
// []byte leaves us the option to change the address length.
|
||||
// Use an alias so Unmarshal methods (with ptr receivers) are available too.
|
||||
type Address = cmn.HexBytes
|
||||
|
||||
type PubKey interface {
|
||||
Address() Address
|
||||
Bytes() []byte
|
||||
VerifyBytes(msg []byte, sig []byte) bool
|
||||
Equals(PubKey) bool
|
||||
}
|
||||
|
||||
type Symmetric interface {
|
||||
Keygen() []byte
|
||||
Encrypt(plaintext []byte, secret []byte) (ciphertext []byte)
|
||||
Decrypt(ciphertext []byte, secret []byte) (plaintext []byte, err error)
|
||||
}
|
@@ -22,7 +22,7 @@
|
||||
// pubKey := key.PubKey()
|
||||
|
||||
// For example:
|
||||
// privKey, err := crypto.GenPrivKeyEd25519()
|
||||
// privKey, err := ed25519.GenPrivKey()
|
||||
// if err != nil {
|
||||
// ...
|
||||
// }
|
||||
|
26
crypto/ed25519/bench_test.go
Normal file
26
crypto/ed25519/bench_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package ed25519
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/internal/benchmarking"
|
||||
)
|
||||
|
||||
func BenchmarkKeyGeneration(b *testing.B) {
|
||||
benchmarkKeygenWrapper := func(reader io.Reader) crypto.PrivKey {
|
||||
return genPrivKey(reader)
|
||||
}
|
||||
benchmarking.BenchmarkKeyGeneration(b, benchmarkKeygenWrapper)
|
||||
}
|
||||
|
||||
func BenchmarkSigning(b *testing.B) {
|
||||
priv := GenPrivKey()
|
||||
benchmarking.BenchmarkSigning(b, priv)
|
||||
}
|
||||
|
||||
func BenchmarkVerification(b *testing.B) {
|
||||
priv := GenPrivKey()
|
||||
benchmarking.BenchmarkVerification(b, priv)
|
||||
}
|
196
crypto/ed25519/ed25519.go
Normal file
196
crypto/ed25519/ed25519.go
Normal file
@@ -0,0 +1,196 @@
|
||||
package ed25519
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/tendermint/ed25519"
|
||||
"github.com/tendermint/ed25519/extra25519"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
var _ crypto.PrivKey = PrivKeyEd25519{}
|
||||
|
||||
const (
|
||||
PrivKeyAminoRoute = "tendermint/PrivKeyEd25519"
|
||||
PubKeyAminoRoute = "tendermint/PubKeyEd25519"
|
||||
// Size of an Edwards25519 signature. Namely the size of a compressed
|
||||
// Edwards25519 point, and a field element. Both of which are 32 bytes.
|
||||
SignatureSize = 64
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
cdc.RegisterInterface((*crypto.PubKey)(nil), nil)
|
||||
cdc.RegisterConcrete(PubKeyEd25519{},
|
||||
PubKeyAminoRoute, nil)
|
||||
|
||||
cdc.RegisterInterface((*crypto.PrivKey)(nil), nil)
|
||||
cdc.RegisterConcrete(PrivKeyEd25519{},
|
||||
PrivKeyAminoRoute, nil)
|
||||
}
|
||||
|
||||
// PrivKeyEd25519 implements crypto.PrivKey.
|
||||
type PrivKeyEd25519 [64]byte
|
||||
|
||||
// Bytes marshals the privkey using amino encoding.
|
||||
func (privKey PrivKeyEd25519) Bytes() []byte {
|
||||
return cdc.MustMarshalBinaryBare(privKey)
|
||||
}
|
||||
|
||||
// Sign produces a signature on the provided message.
|
||||
func (privKey PrivKeyEd25519) Sign(msg []byte) ([]byte, error) {
|
||||
privKeyBytes := [64]byte(privKey)
|
||||
signatureBytes := ed25519.Sign(&privKeyBytes, msg)
|
||||
return signatureBytes[:], nil
|
||||
}
|
||||
|
||||
// PubKey gets the corresponding public key from the private key.
|
||||
func (privKey PrivKeyEd25519) PubKey() crypto.PubKey {
|
||||
privKeyBytes := [64]byte(privKey)
|
||||
initialized := false
|
||||
// If the latter 32 bytes of the privkey are all zero, compute the pubkey
|
||||
// otherwise privkey is initialized and we can use the cached value inside
|
||||
// of the private key.
|
||||
for _, v := range privKeyBytes[32:] {
|
||||
if v != 0 {
|
||||
initialized = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if initialized {
|
||||
var pubkeyBytes [PubKeyEd25519Size]byte
|
||||
copy(pubkeyBytes[:], privKeyBytes[32:])
|
||||
return PubKeyEd25519(pubkeyBytes)
|
||||
}
|
||||
|
||||
pubBytes := *ed25519.MakePublicKey(&privKeyBytes)
|
||||
return PubKeyEd25519(pubBytes)
|
||||
}
|
||||
|
||||
// Equals - you probably don't need to use this.
|
||||
// Runs in constant time based on length of the keys.
|
||||
func (privKey PrivKeyEd25519) Equals(other crypto.PrivKey) bool {
|
||||
if otherEd, ok := other.(PrivKeyEd25519); ok {
|
||||
return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ToCurve25519 takes a private key and returns its representation on
|
||||
// Curve25519. Curve25519 is birationally equivalent to Edwards25519,
|
||||
// which Ed25519 uses internally. This method is intended for use in
|
||||
// an X25519 Diffie Hellman key exchange.
|
||||
func (privKey PrivKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte {
|
||||
keyCurve25519 := new([32]byte)
|
||||
privKeyBytes := [64]byte(privKey)
|
||||
extra25519.PrivateKeyToCurve25519(keyCurve25519, &privKeyBytes)
|
||||
return keyCurve25519
|
||||
}
|
||||
|
||||
// GenPrivKey generates a new ed25519 private key.
|
||||
// It uses OS randomness in conjunction with the current global random seed
|
||||
// in tendermint/libs/common to generate the private key.
|
||||
func GenPrivKey() PrivKeyEd25519 {
|
||||
return genPrivKey(crypto.CReader())
|
||||
}
|
||||
|
||||
// genPrivKey generates a new ed25519 private key using the provided reader.
|
||||
func genPrivKey(rand io.Reader) PrivKeyEd25519 {
|
||||
privKey := new([64]byte)
|
||||
_, err := io.ReadFull(rand, privKey[:32])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey.
|
||||
// It places the pubkey in the last 32 bytes of privKey, and returns the
|
||||
// public key.
|
||||
ed25519.MakePublicKey(privKey)
|
||||
return PrivKeyEd25519(*privKey)
|
||||
}
|
||||
|
||||
// GenPrivKeyFromSecret hashes the secret with SHA2, and uses
|
||||
// that 32 byte output to create the private key.
|
||||
// NOTE: secret should be the output of a KDF like bcrypt,
|
||||
// if it's derived from user input.
|
||||
func GenPrivKeyFromSecret(secret []byte) PrivKeyEd25519 {
|
||||
privKey32 := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes.
|
||||
privKey := new([64]byte)
|
||||
copy(privKey[:32], privKey32)
|
||||
// ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey.
|
||||
// It places the pubkey in the last 32 bytes of privKey, and returns the
|
||||
// public key.
|
||||
ed25519.MakePublicKey(privKey)
|
||||
return PrivKeyEd25519(*privKey)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
var _ crypto.PubKey = PubKeyEd25519{}
|
||||
|
||||
// PubKeyEd25519Size is the number of bytes in an Ed25519 signature.
|
||||
const PubKeyEd25519Size = 32
|
||||
|
||||
// PubKeyEd25519 implements crypto.PubKey for the Ed25519 signature scheme.
|
||||
type PubKeyEd25519 [PubKeyEd25519Size]byte
|
||||
|
||||
// Address is the SHA256-20 of the raw pubkey bytes.
|
||||
func (pubKey PubKeyEd25519) Address() crypto.Address {
|
||||
return crypto.Address(tmhash.Sum(pubKey[:]))
|
||||
}
|
||||
|
||||
// Bytes marshals the PubKey using amino encoding.
|
||||
func (pubKey PubKeyEd25519) Bytes() []byte {
|
||||
bz, err := cdc.MarshalBinaryBare(pubKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bz
|
||||
}
|
||||
|
||||
func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ []byte) bool {
|
||||
// make sure we use the same algorithm to sign
|
||||
if len(sig_) != SignatureSize {
|
||||
return false
|
||||
}
|
||||
sig := new([SignatureSize]byte)
|
||||
copy(sig[:], sig_)
|
||||
pubKeyBytes := [PubKeyEd25519Size]byte(pubKey)
|
||||
return ed25519.Verify(&pubKeyBytes, msg, sig)
|
||||
}
|
||||
|
||||
// ToCurve25519 takes a public key and returns its representation on
|
||||
// Curve25519. Curve25519 is birationally equivalent to Edwards25519,
|
||||
// which Ed25519 uses internally. This method is intended for use in
|
||||
// an X25519 Diffie Hellman key exchange.
|
||||
//
|
||||
// If there is an error, then this function returns nil.
|
||||
func (pubKey PubKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte {
|
||||
keyCurve25519, pubKeyBytes := new([PubKeyEd25519Size]byte), [PubKeyEd25519Size]byte(pubKey)
|
||||
ok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return keyCurve25519
|
||||
}
|
||||
|
||||
func (pubKey PubKeyEd25519) String() string {
|
||||
return fmt.Sprintf("PubKeyEd25519{%X}", pubKey[:])
|
||||
}
|
||||
|
||||
// nolint: golint
|
||||
func (pubKey PubKeyEd25519) Equals(other crypto.PubKey) bool {
|
||||
if otherEd, ok := other.(PubKeyEd25519); ok {
|
||||
return bytes.Equal(pubKey[:], otherEd[:])
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
29
crypto/ed25519/ed25519_test.go
Normal file
29
crypto/ed25519/ed25519_test.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package ed25519_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
)
|
||||
|
||||
func TestSignAndValidateEd25519(t *testing.T) {
|
||||
|
||||
privKey := ed25519.GenPrivKey()
|
||||
pubKey := privKey.PubKey()
|
||||
|
||||
msg := crypto.CRandBytes(128)
|
||||
sig, err := privKey.Sign(msg)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Test the signature
|
||||
assert.True(t, pubKey.VerifyBytes(msg, sig))
|
||||
|
||||
// Mutate the signature, just one bit.
|
||||
// TODO: Replace this with a much better fuzzer, tendermint/ed25519/issues/10
|
||||
sig[7] ^= byte(0x01)
|
||||
|
||||
assert.False(t, pubKey.VerifyBytes(msg, sig))
|
||||
}
|
50
crypto/encoding/amino/amino.go
Normal file
50
crypto/encoding/amino/amino.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package cryptoAmino
|
||||
|
||||
import (
|
||||
amino "github.com/tendermint/go-amino"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/multisig"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
// NOTE: It's important that there be no conflicts here,
|
||||
// as that would change the canonical representations,
|
||||
// and therefore change the address.
|
||||
// TODO: Remove above note when
|
||||
// https://github.com/tendermint/go-amino/issues/9
|
||||
// is resolved
|
||||
RegisterAmino(cdc)
|
||||
}
|
||||
|
||||
// RegisterAmino registers all crypto related types in the given (amino) codec.
|
||||
func RegisterAmino(cdc *amino.Codec) {
|
||||
// These are all written here instead of
|
||||
cdc.RegisterInterface((*crypto.PubKey)(nil), nil)
|
||||
cdc.RegisterConcrete(ed25519.PubKeyEd25519{},
|
||||
ed25519.PubKeyAminoRoute, nil)
|
||||
cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{},
|
||||
secp256k1.PubKeyAminoRoute, nil)
|
||||
cdc.RegisterConcrete(multisig.PubKeyMultisigThreshold{},
|
||||
multisig.PubKeyMultisigThresholdAminoRoute, nil)
|
||||
|
||||
cdc.RegisterInterface((*crypto.PrivKey)(nil), nil)
|
||||
cdc.RegisterConcrete(ed25519.PrivKeyEd25519{},
|
||||
ed25519.PrivKeyAminoRoute, nil)
|
||||
cdc.RegisterConcrete(secp256k1.PrivKeySecp256k1{},
|
||||
secp256k1.PrivKeyAminoRoute, nil)
|
||||
}
|
||||
|
||||
func PrivKeyFromBytes(privKeyBytes []byte) (privKey crypto.PrivKey, err error) {
|
||||
err = cdc.UnmarshalBinaryBare(privKeyBytes, &privKey)
|
||||
return
|
||||
}
|
||||
|
||||
func PubKeyFromBytes(pubKeyBytes []byte) (pubKey crypto.PubKey, err error) {
|
||||
err = cdc.UnmarshalBinaryBare(pubKeyBytes, &pubKey)
|
||||
return
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package crypto
|
||||
package cryptoAmino
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -6,18 +6,23 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
)
|
||||
|
||||
type byter interface {
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
func checkAminoBinary(t *testing.T, src byter, dst interface{}, size int) {
|
||||
func checkAminoBinary(t *testing.T, src, dst interface{}, size int) {
|
||||
// Marshal to binary bytes.
|
||||
bz, err := cdc.MarshalBinaryBare(src)
|
||||
require.Nil(t, err, "%+v", err)
|
||||
// Make sure this is compatible with current (Bytes()) encoding.
|
||||
assert.Equal(t, src.Bytes(), bz, "Amino binary vs Bytes() mismatch")
|
||||
if byterSrc, ok := src.(byter); ok {
|
||||
// Make sure this is compatible with current (Bytes()) encoding.
|
||||
assert.Equal(t, byterSrc.Bytes(), bz, "Amino binary vs Bytes() mismatch")
|
||||
}
|
||||
// Make sure we have the expected length.
|
||||
if size != -1 {
|
||||
assert.Equal(t, size, len(bz), "Amino binary size mismatch")
|
||||
@@ -48,72 +53,77 @@ func ExamplePrintRegisteredTypes() {
|
||||
//| ---- | ---- | ------ | ----- | ------ |
|
||||
//| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | |
|
||||
//| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | |
|
||||
//| PubKeyMultisigThreshold | tendermint/PubKeyMultisigThreshold | 0x22C1F7E2 | variable | |
|
||||
//| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | |
|
||||
//| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | |
|
||||
//| SignatureEd25519 | tendermint/SignatureEd25519 | 0x2031EA53 | 0x40 | |
|
||||
//| SignatureSecp256k1 | tendermint/SignatureSecp256k1 | 0x7FC4A495 | variable | |
|
||||
}
|
||||
|
||||
func TestKeyEncodings(t *testing.T) {
|
||||
cases := []struct {
|
||||
privKey PrivKey
|
||||
privSize, pubSize int // binary sizes
|
||||
privKey crypto.PrivKey
|
||||
privSize, pubSize, sigSize int // binary sizes
|
||||
}{
|
||||
{
|
||||
privKey: GenPrivKeyEd25519(),
|
||||
privKey: ed25519.GenPrivKey(),
|
||||
privSize: 69,
|
||||
pubSize: 37,
|
||||
sigSize: 65,
|
||||
},
|
||||
{
|
||||
privKey: GenPrivKeySecp256k1(),
|
||||
privKey: secp256k1.GenPrivKey(),
|
||||
privSize: 37,
|
||||
pubSize: 38,
|
||||
sigSize: 65,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
for tcIndex, tc := range cases {
|
||||
|
||||
// Check (de/en)codings of PrivKeys.
|
||||
var priv2, priv3 PrivKey
|
||||
var priv2, priv3 crypto.PrivKey
|
||||
checkAminoBinary(t, tc.privKey, &priv2, tc.privSize)
|
||||
assert.EqualValues(t, tc.privKey, priv2)
|
||||
assert.EqualValues(t, tc.privKey, priv2, "tc #%d", tcIndex)
|
||||
checkAminoJSON(t, tc.privKey, &priv3, false) // TODO also check Prefix bytes.
|
||||
assert.EqualValues(t, tc.privKey, priv3)
|
||||
assert.EqualValues(t, tc.privKey, priv3, "tc #%d", tcIndex)
|
||||
|
||||
// Check (de/en)codings of Signatures.
|
||||
var sig1, sig2, sig3 Signature
|
||||
var sig1, sig2 []byte
|
||||
sig1, err := tc.privKey.Sign([]byte("something"))
|
||||
assert.NoError(t, err)
|
||||
checkAminoBinary(t, sig1, &sig2, -1) // Signature size changes for Secp anyways.
|
||||
assert.EqualValues(t, sig1, sig2)
|
||||
checkAminoJSON(t, sig1, &sig3, false) // TODO also check Prefix bytes.
|
||||
assert.EqualValues(t, sig1, sig3)
|
||||
assert.NoError(t, err, "tc #%d", tcIndex)
|
||||
checkAminoBinary(t, sig1, &sig2, tc.sigSize)
|
||||
assert.EqualValues(t, sig1, sig2, "tc #%d", tcIndex)
|
||||
|
||||
// Check (de/en)codings of PubKeys.
|
||||
pubKey := tc.privKey.PubKey()
|
||||
var pub2, pub3 PubKey
|
||||
var pub2, pub3 crypto.PubKey
|
||||
checkAminoBinary(t, pubKey, &pub2, tc.pubSize)
|
||||
assert.EqualValues(t, pubKey, pub2)
|
||||
assert.EqualValues(t, pubKey, pub2, "tc #%d", tcIndex)
|
||||
checkAminoJSON(t, pubKey, &pub3, false) // TODO also check Prefix bytes.
|
||||
assert.EqualValues(t, pubKey, pub3)
|
||||
assert.EqualValues(t, pubKey, pub3, "tc #%d", tcIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNilEncodings(t *testing.T) {
|
||||
|
||||
// Check nil Signature.
|
||||
var a, b Signature
|
||||
var a, b []byte
|
||||
checkAminoJSON(t, &a, &b, true)
|
||||
assert.EqualValues(t, a, b)
|
||||
|
||||
// Check nil PubKey.
|
||||
var c, d PubKey
|
||||
var c, d crypto.PubKey
|
||||
checkAminoJSON(t, &c, &d, true)
|
||||
assert.EqualValues(t, c, d)
|
||||
|
||||
// Check nil PrivKey.
|
||||
var e, f PrivKey
|
||||
var e, f crypto.PrivKey
|
||||
checkAminoJSON(t, &e, &f, true)
|
||||
assert.EqualValues(t, e, f)
|
||||
|
||||
}
|
||||
|
||||
func TestPubKeyInvalidDataProperReturnsEmpty(t *testing.T) {
|
||||
pk, err := PubKeyFromBytes([]byte("foo"))
|
||||
require.NotNil(t, err, "expecting a non-nil error")
|
||||
require.Nil(t, pk, "expecting an empty public key on error")
|
||||
}
|
@@ -2,6 +2,7 @@ package crypto
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
)
|
||||
|
||||
|
@@ -1,105 +0,0 @@
|
||||
// Package hkdfchacha20poly1305 creates an AEAD using hkdf, chacha20, and poly1305
|
||||
// When sealing and opening, the hkdf is used to obtain the nonce and subkey for
|
||||
// chacha20. Other than the change for the how the subkey and nonce for chacha
|
||||
// are obtained, this is the same as chacha20poly1305
|
||||
package hkdfchacha20poly1305
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/chacha20poly1305"
|
||||
"golang.org/x/crypto/hkdf"
|
||||
)
|
||||
|
||||
type hkdfchacha20poly1305 struct {
|
||||
key [KeySize]byte
|
||||
}
|
||||
|
||||
const (
|
||||
// KeySize is the size of the key used by this AEAD, in bytes.
|
||||
KeySize = 32
|
||||
// NonceSize is the size of the nonce used with this AEAD, in bytes.
|
||||
NonceSize = 24
|
||||
// TagSize is the size added from poly1305
|
||||
TagSize = 16
|
||||
// MaxPlaintextSize is the max size that can be passed into a single call of Seal
|
||||
MaxPlaintextSize = (1 << 38) - 64
|
||||
// MaxCiphertextSize is the max size that can be passed into a single call of Open,
|
||||
// this differs from plaintext size due to the tag
|
||||
MaxCiphertextSize = (1 << 38) - 48
|
||||
// HkdfInfo is the parameter used internally for Hkdf's info parameter.
|
||||
HkdfInfo = "TENDERMINT_SECRET_CONNECTION_FRAME_KEY_DERIVE"
|
||||
)
|
||||
|
||||
//New xChaChapoly1305 AEAD with 24 byte nonces
|
||||
func New(key []byte) (cipher.AEAD, error) {
|
||||
if len(key) != KeySize {
|
||||
return nil, errors.New("chacha20poly1305: bad key length")
|
||||
}
|
||||
ret := new(hkdfchacha20poly1305)
|
||||
copy(ret.key[:], key)
|
||||
return ret, nil
|
||||
|
||||
}
|
||||
func (c *hkdfchacha20poly1305) NonceSize() int {
|
||||
return NonceSize
|
||||
}
|
||||
|
||||
func (c *hkdfchacha20poly1305) Overhead() int {
|
||||
return TagSize
|
||||
}
|
||||
|
||||
func (c *hkdfchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
|
||||
if len(nonce) != NonceSize {
|
||||
panic("hkdfchacha20poly1305: bad nonce length passed to Seal")
|
||||
}
|
||||
|
||||
if uint64(len(plaintext)) > MaxPlaintextSize {
|
||||
panic("hkdfchacha20poly1305: plaintext too large")
|
||||
}
|
||||
|
||||
subKey, chachaNonce := getSubkeyAndChachaNonceFromHkdf(&c.key, &nonce)
|
||||
|
||||
aead, err := chacha20poly1305.New(subKey[:])
|
||||
if err != nil {
|
||||
panic("hkdfchacha20poly1305: failed to initialize chacha20poly1305")
|
||||
}
|
||||
|
||||
return aead.Seal(dst, chachaNonce[:], plaintext, additionalData)
|
||||
}
|
||||
|
||||
func (c *hkdfchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
|
||||
if len(nonce) != NonceSize {
|
||||
return nil, errors.New("hkdfchacha20poly1305: bad nonce length passed to Open")
|
||||
}
|
||||
if uint64(len(ciphertext)) > MaxCiphertextSize {
|
||||
return nil, errors.New("hkdfchacha20poly1305: ciphertext too large")
|
||||
}
|
||||
|
||||
subKey, chachaNonce := getSubkeyAndChachaNonceFromHkdf(&c.key, &nonce)
|
||||
|
||||
aead, err := chacha20poly1305.New(subKey[:])
|
||||
if err != nil {
|
||||
panic("hkdfchacha20poly1305: failed to initialize chacha20poly1305")
|
||||
}
|
||||
|
||||
return aead.Open(dst, chachaNonce[:], ciphertext, additionalData)
|
||||
}
|
||||
|
||||
func getSubkeyAndChachaNonceFromHkdf(cKey *[32]byte, nonce *[]byte) (
|
||||
subKey [KeySize]byte, chachaNonce [chacha20poly1305.NonceSize]byte) {
|
||||
hash := sha256.New
|
||||
hkdf := hkdf.New(hash, (*cKey)[:], *nonce, []byte(HkdfInfo))
|
||||
_, err := io.ReadFull(hkdf, subKey[:])
|
||||
if err != nil {
|
||||
panic("hkdfchacha20poly1305: failed to read subkey from hkdf")
|
||||
}
|
||||
_, err = io.ReadFull(hkdf, chachaNonce[:])
|
||||
if err != nil {
|
||||
panic("hkdfchacha20poly1305: failed to read chachaNonce from hkdf")
|
||||
}
|
||||
return
|
||||
}
|
88
crypto/internal/benchmarking/bench.go
Normal file
88
crypto/internal/benchmarking/bench.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package benchmarking
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
)
|
||||
|
||||
// The code in this file is adapted from agl/ed25519.
|
||||
// As such it is under the following license.
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found at the bottom of this file.
|
||||
|
||||
type zeroReader struct{}
|
||||
|
||||
func (zeroReader) Read(buf []byte) (int, error) {
|
||||
for i := range buf {
|
||||
buf[i] = 0
|
||||
}
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
// BenchmarkKeyGeneration benchmarks the given key generation algorithm using
|
||||
// a dummy reader.
|
||||
func BenchmarkKeyGeneration(b *testing.B, GenerateKey func(reader io.Reader) crypto.PrivKey) {
|
||||
var zero zeroReader
|
||||
for i := 0; i < b.N; i++ {
|
||||
GenerateKey(zero)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkSigning benchmarks the given signing algorithm using
|
||||
// the provided privkey.
|
||||
func BenchmarkSigning(b *testing.B, priv crypto.PrivKey) {
|
||||
message := []byte("Hello, world!")
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
priv.Sign(message)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkVerification benchmarks the given verification algorithm using
|
||||
// the provided privkey on a constant message.
|
||||
func BenchmarkVerification(b *testing.B, priv crypto.PrivKey) {
|
||||
pub := priv.PubKey()
|
||||
// use a short message, so this time doesn't get dominated by hashing.
|
||||
message := []byte("Hello, world!")
|
||||
signature, err := priv.Sign(message)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
pub.VerifyBytes(message, signature)
|
||||
}
|
||||
}
|
||||
|
||||
// Below is the aforementioned license.
|
||||
|
||||
// Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@@ -6,8 +6,9 @@ import (
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
. "github.com/tendermint/tendermint/libs/test"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
type testItem []byte
|
||||
|
233
crypto/multisig/bitarray/compact_bit_array.go
Normal file
233
crypto/multisig/bitarray/compact_bit_array.go
Normal file
@@ -0,0 +1,233 @@
|
||||
package bitarray
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CompactBitArray is an implementation of a space efficient bit array.
|
||||
// This is used to ensure that the encoded data takes up a minimal amount of
|
||||
// space after amino encoding.
|
||||
// This is not thread safe, and is not intended for concurrent usage.
|
||||
type CompactBitArray struct {
|
||||
ExtraBitsStored byte `json:"extra_bits"` // The number of extra bits in elems.
|
||||
Elems []byte `json:"bits"`
|
||||
}
|
||||
|
||||
// NewCompactBitArray returns a new compact bit array.
|
||||
// It returns nil if the number of bits is zero.
|
||||
func NewCompactBitArray(bits int) *CompactBitArray {
|
||||
if bits <= 0 {
|
||||
return nil
|
||||
}
|
||||
return &CompactBitArray{
|
||||
ExtraBitsStored: byte(bits % 8),
|
||||
Elems: make([]byte, (bits+7)/8),
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the number of bits in the bitarray
|
||||
func (bA *CompactBitArray) Size() int {
|
||||
if bA == nil {
|
||||
return 0
|
||||
} else if bA.ExtraBitsStored == byte(0) {
|
||||
return len(bA.Elems) * 8
|
||||
}
|
||||
// num_bits = 8*num_full_bytes + overflow_in_last_byte
|
||||
// num_full_bytes = (len(bA.Elems)-1)
|
||||
return (len(bA.Elems)-1)*8 + int(bA.ExtraBitsStored)
|
||||
}
|
||||
|
||||
// GetIndex returns the bit at index i within the bit array.
|
||||
// The behavior is undefined if i >= bA.Size()
|
||||
func (bA *CompactBitArray) GetIndex(i int) bool {
|
||||
if bA == nil {
|
||||
return false
|
||||
}
|
||||
if i >= bA.Size() {
|
||||
return false
|
||||
}
|
||||
return bA.Elems[i>>3]&(uint8(1)<<uint8(7-(i%8))) > 0
|
||||
}
|
||||
|
||||
// SetIndex sets the bit at index i within the bit array.
|
||||
// The behavior is undefined if i >= bA.Size()
|
||||
func (bA *CompactBitArray) SetIndex(i int, v bool) bool {
|
||||
if bA == nil {
|
||||
return false
|
||||
}
|
||||
if i >= bA.Size() {
|
||||
return false
|
||||
}
|
||||
if v {
|
||||
bA.Elems[i>>3] |= (uint8(1) << uint8(7-(i%8)))
|
||||
} else {
|
||||
bA.Elems[i>>3] &= ^(uint8(1) << uint8(7-(i%8)))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NumTrueBitsBefore returns the number of bits set to true before the
|
||||
// given index. e.g. if bA = _XX__XX, NumOfTrueBitsBefore(4) = 2, since
|
||||
// there are two bits set to true before index 4.
|
||||
func (bA *CompactBitArray) NumTrueBitsBefore(index int) int {
|
||||
numTrueValues := 0
|
||||
for i := 0; i < index; i++ {
|
||||
if bA.GetIndex(i) {
|
||||
numTrueValues++
|
||||
}
|
||||
}
|
||||
return numTrueValues
|
||||
}
|
||||
|
||||
// Copy returns a copy of the provided bit array.
|
||||
func (bA *CompactBitArray) Copy() *CompactBitArray {
|
||||
if bA == nil {
|
||||
return nil
|
||||
}
|
||||
c := make([]byte, len(bA.Elems))
|
||||
copy(c, bA.Elems)
|
||||
return &CompactBitArray{
|
||||
ExtraBitsStored: bA.ExtraBitsStored,
|
||||
Elems: c,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of CompactBitArray: BA{<bit-string>},
|
||||
// where <bit-string> is a sequence of 'x' (1) and '_' (0).
|
||||
// The <bit-string> includes spaces and newlines to help people.
|
||||
// For a simple sequence of 'x' and '_' characters with no spaces or newlines,
|
||||
// see the MarshalJSON() method.
|
||||
// Example: "BA{_x_}" or "nil-BitArray" for nil.
|
||||
func (bA *CompactBitArray) String() string {
|
||||
return bA.StringIndented("")
|
||||
}
|
||||
|
||||
// StringIndented returns the same thing as String(), but applies the indent
|
||||
// at every 10th bit, and twice at every 50th bit.
|
||||
func (bA *CompactBitArray) StringIndented(indent string) string {
|
||||
if bA == nil {
|
||||
return "nil-BitArray"
|
||||
}
|
||||
lines := []string{}
|
||||
bits := ""
|
||||
size := bA.Size()
|
||||
for i := 0; i < size; i++ {
|
||||
if bA.GetIndex(i) {
|
||||
bits += "x"
|
||||
} else {
|
||||
bits += "_"
|
||||
}
|
||||
if i%100 == 99 {
|
||||
lines = append(lines, bits)
|
||||
bits = ""
|
||||
}
|
||||
if i%10 == 9 {
|
||||
bits += indent
|
||||
}
|
||||
if i%50 == 49 {
|
||||
bits += indent
|
||||
}
|
||||
}
|
||||
if len(bits) > 0 {
|
||||
lines = append(lines, bits)
|
||||
}
|
||||
return fmt.Sprintf("BA{%v:%v}", size, strings.Join(lines, indent))
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler interface by marshaling bit array
|
||||
// using a custom format: a string of '-' or 'x' where 'x' denotes the 1 bit.
|
||||
func (bA *CompactBitArray) MarshalJSON() ([]byte, error) {
|
||||
if bA == nil {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
|
||||
bits := `"`
|
||||
size := bA.Size()
|
||||
for i := 0; i < size; i++ {
|
||||
if bA.GetIndex(i) {
|
||||
bits += `x`
|
||||
} else {
|
||||
bits += `_`
|
||||
}
|
||||
}
|
||||
bits += `"`
|
||||
return []byte(bits), nil
|
||||
}
|
||||
|
||||
var bitArrayJSONRegexp = regexp.MustCompile(`\A"([_x]*)"\z`)
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface by unmarshaling a custom
|
||||
// JSON description.
|
||||
func (bA *CompactBitArray) UnmarshalJSON(bz []byte) error {
|
||||
b := string(bz)
|
||||
if b == "null" {
|
||||
// This is required e.g. for encoding/json when decoding
|
||||
// into a pointer with pre-allocated BitArray.
|
||||
bA.ExtraBitsStored = 0
|
||||
bA.Elems = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate 'b'.
|
||||
match := bitArrayJSONRegexp.FindStringSubmatch(b)
|
||||
if match == nil {
|
||||
return fmt.Errorf("BitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b)
|
||||
}
|
||||
bits := match[1]
|
||||
|
||||
// Construct new CompactBitArray and copy over.
|
||||
numBits := len(bits)
|
||||
bA2 := NewCompactBitArray(numBits)
|
||||
for i := 0; i < numBits; i++ {
|
||||
if bits[i] == 'x' {
|
||||
bA2.SetIndex(i, true)
|
||||
}
|
||||
}
|
||||
*bA = *bA2
|
||||
return nil
|
||||
}
|
||||
|
||||
// CompactMarshal is a space efficient encoding for CompactBitArray.
|
||||
// It is not amino compatible.
|
||||
func (bA *CompactBitArray) CompactMarshal() []byte {
|
||||
size := bA.Size()
|
||||
if size <= 0 {
|
||||
return []byte("null")
|
||||
}
|
||||
bz := make([]byte, 0, size/8)
|
||||
// length prefix number of bits, not number of bytes. This difference
|
||||
// takes 3-4 bits in encoding, as opposed to instead encoding the number of
|
||||
// bytes (saving 3-4 bits) and including the offset as a full byte.
|
||||
bz = appendUvarint(bz, uint64(size))
|
||||
bz = append(bz, bA.Elems...)
|
||||
return bz
|
||||
}
|
||||
|
||||
// CompactUnmarshal is a space efficient decoding for CompactBitArray.
|
||||
// It is not amino compatible.
|
||||
func CompactUnmarshal(bz []byte) (*CompactBitArray, error) {
|
||||
if len(bz) < 2 {
|
||||
return nil, errors.New("compact bit array: invalid compact unmarshal size")
|
||||
} else if bytes.Equal(bz, []byte("null")) {
|
||||
return NewCompactBitArray(0), nil
|
||||
}
|
||||
size, n := binary.Uvarint(bz)
|
||||
bz = bz[n:]
|
||||
if len(bz) != int(size+7)/8 {
|
||||
return nil, errors.New("compact bit array: invalid compact unmarshal size")
|
||||
}
|
||||
|
||||
bA := &CompactBitArray{byte(int(size % 8)), bz}
|
||||
return bA, nil
|
||||
}
|
||||
|
||||
func appendUvarint(b []byte, x uint64) []byte {
|
||||
var a [binary.MaxVarintLen64]byte
|
||||
n := binary.PutUvarint(a[:], x)
|
||||
return append(b, a[:n]...)
|
||||
}
|
196
crypto/multisig/bitarray/compact_bit_array_test.go
Normal file
196
crypto/multisig/bitarray/compact_bit_array_test.go
Normal file
@@ -0,0 +1,196 @@
|
||||
package bitarray
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func randCompactBitArray(bits int) (*CompactBitArray, []byte) {
|
||||
numBytes := (bits + 7) / 8
|
||||
src := cmn.RandBytes((bits + 7) / 8)
|
||||
bA := NewCompactBitArray(bits)
|
||||
|
||||
for i := 0; i < numBytes-1; i++ {
|
||||
for j := uint8(0); j < 8; j++ {
|
||||
bA.SetIndex(i*8+int(j), src[i]&(uint8(1)<<(8-j)) > 0)
|
||||
}
|
||||
}
|
||||
// Set remaining bits
|
||||
for i := uint8(0); i < 8-uint8(bA.ExtraBitsStored); i++ {
|
||||
bA.SetIndex(numBytes*8+int(i), src[numBytes-1]&(uint8(1)<<(8-i)) > 0)
|
||||
}
|
||||
return bA, src
|
||||
}
|
||||
|
||||
func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) {
|
||||
bitList := []int{-127, -128, -1 << 31}
|
||||
for _, bits := range bitList {
|
||||
bA := NewCompactBitArray(bits)
|
||||
require.Nil(t, bA)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONMarshalUnmarshal(t *testing.T) {
|
||||
|
||||
bA1 := NewCompactBitArray(0)
|
||||
bA2 := NewCompactBitArray(1)
|
||||
|
||||
bA3 := NewCompactBitArray(1)
|
||||
bA3.SetIndex(0, true)
|
||||
|
||||
bA4 := NewCompactBitArray(5)
|
||||
bA4.SetIndex(0, true)
|
||||
bA4.SetIndex(1, true)
|
||||
|
||||
bA5 := NewCompactBitArray(9)
|
||||
bA5.SetIndex(0, true)
|
||||
bA5.SetIndex(1, true)
|
||||
bA5.SetIndex(8, true)
|
||||
|
||||
bA6 := NewCompactBitArray(16)
|
||||
bA6.SetIndex(0, true)
|
||||
bA6.SetIndex(1, true)
|
||||
bA6.SetIndex(8, false)
|
||||
bA6.SetIndex(15, true)
|
||||
|
||||
testCases := []struct {
|
||||
bA *CompactBitArray
|
||||
marshalledBA string
|
||||
}{
|
||||
{nil, `null`},
|
||||
{bA1, `null`},
|
||||
{bA2, `"_"`},
|
||||
{bA3, `"x"`},
|
||||
{bA4, `"xx___"`},
|
||||
{bA5, `"xx______x"`},
|
||||
{bA6, `"xx_____________x"`},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.bA.String(), func(t *testing.T) {
|
||||
bz, err := json.Marshal(tc.bA)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, tc.marshalledBA, string(bz))
|
||||
|
||||
var unmarshalledBA *CompactBitArray
|
||||
err = json.Unmarshal(bz, &unmarshalledBA)
|
||||
require.NoError(t, err)
|
||||
|
||||
if tc.bA == nil {
|
||||
require.Nil(t, unmarshalledBA)
|
||||
} else {
|
||||
require.NotNil(t, unmarshalledBA)
|
||||
assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)
|
||||
if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) {
|
||||
assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactMarshalUnmarshal(t *testing.T) {
|
||||
bA1 := NewCompactBitArray(0)
|
||||
bA2 := NewCompactBitArray(1)
|
||||
|
||||
bA3 := NewCompactBitArray(1)
|
||||
bA3.SetIndex(0, true)
|
||||
|
||||
bA4 := NewCompactBitArray(5)
|
||||
bA4.SetIndex(0, true)
|
||||
bA4.SetIndex(1, true)
|
||||
|
||||
bA5 := NewCompactBitArray(9)
|
||||
bA5.SetIndex(0, true)
|
||||
bA5.SetIndex(1, true)
|
||||
bA5.SetIndex(8, true)
|
||||
|
||||
bA6 := NewCompactBitArray(16)
|
||||
bA6.SetIndex(0, true)
|
||||
bA6.SetIndex(1, true)
|
||||
bA6.SetIndex(8, false)
|
||||
bA6.SetIndex(15, true)
|
||||
|
||||
testCases := []struct {
|
||||
bA *CompactBitArray
|
||||
marshalledBA []byte
|
||||
}{
|
||||
{nil, []byte("null")},
|
||||
{bA1, []byte("null")},
|
||||
{bA2, []byte{byte(1), byte(0)}},
|
||||
{bA3, []byte{byte(1), byte(128)}},
|
||||
{bA4, []byte{byte(5), byte(192)}},
|
||||
{bA5, []byte{byte(9), byte(192), byte(128)}},
|
||||
{bA6, []byte{byte(16), byte(192), byte(1)}},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.bA.String(), func(t *testing.T) {
|
||||
bz := tc.bA.CompactMarshal()
|
||||
|
||||
assert.Equal(t, tc.marshalledBA, bz)
|
||||
|
||||
unmarshalledBA, err := CompactUnmarshal(bz)
|
||||
require.NoError(t, err)
|
||||
if tc.bA == nil {
|
||||
require.Nil(t, unmarshalledBA)
|
||||
} else {
|
||||
require.NotNil(t, unmarshalledBA)
|
||||
assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)
|
||||
if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) {
|
||||
assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactBitArrayNumOfTrueBitsBefore(t *testing.T) {
|
||||
testCases := []struct {
|
||||
marshalledBA string
|
||||
bAIndex []int
|
||||
trueValueIndex []int
|
||||
}{
|
||||
{`"_____"`, []int{0, 1, 2, 3, 4}, []int{0, 0, 0, 0, 0}},
|
||||
{`"x"`, []int{0}, []int{0}},
|
||||
{`"_x"`, []int{1}, []int{0}},
|
||||
{`"x___xxxx"`, []int{0, 4, 5, 6, 7}, []int{0, 1, 2, 3, 4}},
|
||||
{`"__x_xx_x__x_x___"`, []int{2, 4, 5, 7, 10, 12}, []int{0, 1, 2, 3, 4, 5}},
|
||||
{`"______________xx"`, []int{14, 15}, []int{0, 1}},
|
||||
}
|
||||
for tcIndex, tc := range testCases {
|
||||
t.Run(tc.marshalledBA, func(t *testing.T) {
|
||||
var bA *CompactBitArray
|
||||
err := json.Unmarshal([]byte(tc.marshalledBA), &bA)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(tc.bAIndex); i++ {
|
||||
require.Equal(t, tc.trueValueIndex[i], bA.NumTrueBitsBefore(tc.bAIndex[i]), "tc %d, i %d", tcIndex, i)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactBitArrayGetSetIndex(t *testing.T) {
|
||||
r := rand.New(rand.NewSource(100))
|
||||
numTests := 10
|
||||
numBitsPerArr := 100
|
||||
for i := 0; i < numTests; i++ {
|
||||
bits := r.Intn(1000)
|
||||
bA, _ := randCompactBitArray(bits)
|
||||
|
||||
for j := 0; j < numBitsPerArr; j++ {
|
||||
copy := bA.Copy()
|
||||
index := r.Intn(bits)
|
||||
val := (r.Int63() % 2) == 0
|
||||
bA.SetIndex(index, val)
|
||||
require.Equal(t, val, bA.GetIndex(index), "bA.SetIndex(%d, %v) failed on bit array: %s", index, val, copy)
|
||||
}
|
||||
}
|
||||
}
|
70
crypto/multisig/multisignature.go
Normal file
70
crypto/multisig/multisignature.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/multisig/bitarray"
|
||||
)
|
||||
|
||||
// Multisignature is used to represent the signature object used in the multisigs.
|
||||
// Sigs is a list of signatures, sorted by corresponding index.
|
||||
type Multisignature struct {
|
||||
BitArray *bitarray.CompactBitArray
|
||||
Sigs [][]byte
|
||||
}
|
||||
|
||||
// NewMultisig returns a new Multisignature of size n.
|
||||
func NewMultisig(n int) *Multisignature {
|
||||
// Default the signature list to have a capacity of two, since we can
|
||||
// expect that most multisigs will require multiple signers.
|
||||
return &Multisignature{bitarray.NewCompactBitArray(n), make([][]byte, 0, 2)}
|
||||
}
|
||||
|
||||
// GetIndex returns the index of pk in keys. Returns -1 if not found
|
||||
func getIndex(pk crypto.PubKey, keys []crypto.PubKey) int {
|
||||
for i := 0; i < len(keys); i++ {
|
||||
if pk.Equals(keys[i]) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// AddSignature adds a signature to the multisig, at the corresponding index.
|
||||
// If the signature already exists, replace it.
|
||||
func (mSig *Multisignature) AddSignature(sig []byte, index int) {
|
||||
newSigIndex := mSig.BitArray.NumTrueBitsBefore(index)
|
||||
// Signature already exists, just replace the value there
|
||||
if mSig.BitArray.GetIndex(index) {
|
||||
mSig.Sigs[newSigIndex] = sig
|
||||
return
|
||||
}
|
||||
mSig.BitArray.SetIndex(index, true)
|
||||
// Optimization if the index is the greatest index
|
||||
if newSigIndex == len(mSig.Sigs) {
|
||||
mSig.Sigs = append(mSig.Sigs, sig)
|
||||
return
|
||||
}
|
||||
// Expand slice by one with a dummy element, move all elements after i
|
||||
// over by one, then place the new signature in that gap.
|
||||
mSig.Sigs = append(mSig.Sigs, make([]byte, 0))
|
||||
copy(mSig.Sigs[newSigIndex+1:], mSig.Sigs[newSigIndex:])
|
||||
mSig.Sigs[newSigIndex] = sig
|
||||
}
|
||||
|
||||
// AddSignatureFromPubKey adds a signature to the multisig,
|
||||
// at the index in keys corresponding to the provided pubkey.
|
||||
func (mSig *Multisignature) AddSignatureFromPubKey(sig []byte, pubkey crypto.PubKey, keys []crypto.PubKey) error {
|
||||
index := getIndex(pubkey, keys)
|
||||
if index == -1 {
|
||||
return errors.New("provided key didn't exist in pubkeys")
|
||||
}
|
||||
mSig.AddSignature(sig, index)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshal the multisignature with amino
|
||||
func (mSig *Multisignature) Marshal() []byte {
|
||||
return cdc.MustMarshalBinaryBare(mSig)
|
||||
}
|
92
crypto/multisig/threshold_pubkey.go
Normal file
92
crypto/multisig/threshold_pubkey.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
)
|
||||
|
||||
// PubKeyMultisigThreshold implements a K of N threshold multisig.
|
||||
type PubKeyMultisigThreshold struct {
|
||||
K uint `json:"threshold"`
|
||||
PubKeys []crypto.PubKey `json:"pubkeys"`
|
||||
}
|
||||
|
||||
var _ crypto.PubKey = &PubKeyMultisigThreshold{}
|
||||
|
||||
// NewPubKeyMultisigThreshold returns a new PubKeyMultisigThreshold.
|
||||
// Panics if len(pubkeys) < k or 0 >= k.
|
||||
func NewPubKeyMultisigThreshold(k int, pubkeys []crypto.PubKey) crypto.PubKey {
|
||||
if k <= 0 {
|
||||
panic("threshold k of n multisignature: k <= 0")
|
||||
}
|
||||
if len(pubkeys) < k {
|
||||
panic("threshold k of n multisignature: len(pubkeys) < k")
|
||||
}
|
||||
return &PubKeyMultisigThreshold{uint(k), pubkeys}
|
||||
}
|
||||
|
||||
// VerifyBytes expects sig to be an amino encoded version of a MultiSignature.
|
||||
// Returns true iff the multisignature contains k or more signatures
|
||||
// for the correct corresponding keys,
|
||||
// and all signatures are valid. (Not just k of the signatures)
|
||||
// The multisig uses a bitarray, so multiple signatures for the same key is not
|
||||
// a concern.
|
||||
func (pk *PubKeyMultisigThreshold) VerifyBytes(msg []byte, marshalledSig []byte) bool {
|
||||
var sig *Multisignature
|
||||
err := cdc.UnmarshalBinaryBare(marshalledSig, &sig)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
size := sig.BitArray.Size()
|
||||
// ensure bit array is the correct size
|
||||
if len(pk.PubKeys) != size {
|
||||
return false
|
||||
}
|
||||
// ensure size of signature list
|
||||
if len(sig.Sigs) < int(pk.K) || len(sig.Sigs) > size {
|
||||
return false
|
||||
}
|
||||
// ensure at least k signatures are set
|
||||
if sig.BitArray.NumTrueBitsBefore(size) < int(pk.K) {
|
||||
return false
|
||||
}
|
||||
// index in the list of signatures which we are concerned with.
|
||||
sigIndex := 0
|
||||
for i := 0; i < size; i++ {
|
||||
if sig.BitArray.GetIndex(i) {
|
||||
if !pk.PubKeys[i].VerifyBytes(msg, sig.Sigs[sigIndex]) {
|
||||
return false
|
||||
}
|
||||
sigIndex++
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Bytes returns the amino encoded version of the PubKeyMultisigThreshold
|
||||
func (pk *PubKeyMultisigThreshold) Bytes() []byte {
|
||||
return cdc.MustMarshalBinaryBare(pk)
|
||||
}
|
||||
|
||||
// Address returns tmhash(PubKeyMultisigThreshold.Bytes())
|
||||
func (pk *PubKeyMultisigThreshold) Address() crypto.Address {
|
||||
return crypto.Address(tmhash.Sum(pk.Bytes()))
|
||||
}
|
||||
|
||||
// Equals returns true iff pk and other both have the same number of keys, and
|
||||
// all constituent keys are the same, and in the same order.
|
||||
func (pk *PubKeyMultisigThreshold) Equals(other crypto.PubKey) bool {
|
||||
otherKey, sameType := other.(*PubKeyMultisigThreshold)
|
||||
if !sameType {
|
||||
return false
|
||||
}
|
||||
if pk.K != otherKey.K || len(pk.PubKeys) != len(otherKey.PubKeys) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(pk.PubKeys); i++ {
|
||||
if !pk.PubKeys[i].Equals(otherKey.PubKeys[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
112
crypto/multisig/threshold_pubkey_test.go
Normal file
112
crypto/multisig/threshold_pubkey_test.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
)
|
||||
|
||||
// This tests multisig functionality, but it expects the first k signatures to be valid
|
||||
// TODO: Adapt it to give more flexibility about first k signatures being valid
|
||||
func TestThresholdMultisigValidCases(t *testing.T) {
|
||||
pkSet1, sigSet1 := generatePubKeysAndSignatures(5, []byte{1, 2, 3, 4})
|
||||
cases := []struct {
|
||||
msg []byte
|
||||
k int
|
||||
pubkeys []crypto.PubKey
|
||||
signingIndices []int
|
||||
// signatures should be the same size as signingIndices.
|
||||
signatures [][]byte
|
||||
passAfterKSignatures []bool
|
||||
}{
|
||||
{
|
||||
msg: []byte{1, 2, 3, 4},
|
||||
k: 2,
|
||||
pubkeys: pkSet1,
|
||||
signingIndices: []int{0, 3, 1},
|
||||
signatures: sigSet1,
|
||||
passAfterKSignatures: []bool{false},
|
||||
},
|
||||
}
|
||||
for tcIndex, tc := range cases {
|
||||
multisigKey := NewPubKeyMultisigThreshold(tc.k, tc.pubkeys)
|
||||
multisignature := NewMultisig(len(tc.pubkeys))
|
||||
for i := 0; i < tc.k-1; i++ {
|
||||
signingIndex := tc.signingIndices[i]
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
|
||||
require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
|
||||
"multisig passed when i < k, tc %d, i %d", tcIndex, i)
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
|
||||
require.Equal(t, i+1, len(multisignature.Sigs),
|
||||
"adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex)
|
||||
}
|
||||
require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
|
||||
"multisig passed with k - 1 sigs, tc %d", tcIndex)
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[tc.signingIndices[tc.k]], tc.pubkeys[tc.signingIndices[tc.k]], tc.pubkeys)
|
||||
require.True(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
|
||||
"multisig failed after k good signatures, tc %d", tcIndex)
|
||||
for i := tc.k + 1; i < len(tc.signingIndices); i++ {
|
||||
signingIndex := tc.signingIndices[i]
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
|
||||
require.Equal(t, tc.passAfterKSignatures[i-tc.k-1],
|
||||
multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
|
||||
"multisig didn't verify as expected after k sigs, tc %d, i %d", tcIndex, i)
|
||||
|
||||
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
|
||||
require.Equal(t, i+1, len(multisignature.Sigs),
|
||||
"adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Fully replace this test with table driven tests
|
||||
func TestThresholdMultisigDuplicateSignatures(t *testing.T) {
|
||||
msg := []byte{1, 2, 3, 4, 5}
|
||||
pubkeys, sigs := generatePubKeysAndSignatures(5, msg)
|
||||
multisigKey := NewPubKeyMultisigThreshold(2, pubkeys)
|
||||
multisignature := NewMultisig(5)
|
||||
require.False(t, multisigKey.VerifyBytes(msg, multisignature.Marshal()))
|
||||
multisignature.AddSignatureFromPubKey(sigs[0], pubkeys[0], pubkeys)
|
||||
// Add second signature manually
|
||||
multisignature.Sigs = append(multisignature.Sigs, sigs[0])
|
||||
require.False(t, multisigKey.VerifyBytes(msg, multisignature.Marshal()))
|
||||
}
|
||||
|
||||
// TODO: Fully replace this test with table driven tests
|
||||
func TestMultiSigPubKeyEquality(t *testing.T) {
|
||||
msg := []byte{1, 2, 3, 4}
|
||||
pubkeys, _ := generatePubKeysAndSignatures(5, msg)
|
||||
multisigKey := NewPubKeyMultisigThreshold(2, pubkeys)
|
||||
var unmarshalledMultisig *PubKeyMultisigThreshold
|
||||
cdc.MustUnmarshalBinaryBare(multisigKey.Bytes(), &unmarshalledMultisig)
|
||||
require.True(t, multisigKey.Equals(unmarshalledMultisig))
|
||||
|
||||
// Ensure that reordering pubkeys is treated as a different pubkey
|
||||
pubkeysCpy := make([]crypto.PubKey, 5)
|
||||
copy(pubkeysCpy, pubkeys)
|
||||
pubkeysCpy[4] = pubkeys[3]
|
||||
pubkeysCpy[3] = pubkeys[4]
|
||||
multisigKey2 := NewPubKeyMultisigThreshold(2, pubkeysCpy)
|
||||
require.False(t, multisigKey.Equals(multisigKey2))
|
||||
}
|
||||
|
||||
func generatePubKeysAndSignatures(n int, msg []byte) (pubkeys []crypto.PubKey, signatures [][]byte) {
|
||||
pubkeys = make([]crypto.PubKey, n)
|
||||
signatures = make([][]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
var privkey crypto.PrivKey
|
||||
if rand.Int63()%2 == 0 {
|
||||
privkey = ed25519.GenPrivKey()
|
||||
} else {
|
||||
privkey = secp256k1.GenPrivKey()
|
||||
}
|
||||
pubkeys[i] = privkey.PubKey()
|
||||
signatures[i], _ = privkey.Sign(msg)
|
||||
}
|
||||
return
|
||||
}
|
26
crypto/multisig/wire.go
Normal file
26
crypto/multisig/wire.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
)
|
||||
|
||||
// TODO: Figure out API for others to either add their own pubkey types, or
|
||||
// to make verify / marshal accept a cdc.
|
||||
const (
|
||||
PubKeyMultisigThresholdAminoRoute = "tendermint/PubKeyMultisigThreshold"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
cdc.RegisterInterface((*crypto.PubKey)(nil), nil)
|
||||
cdc.RegisterConcrete(PubKeyMultisigThreshold{},
|
||||
PubKeyMultisigThresholdAminoRoute, nil)
|
||||
cdc.RegisterConcrete(ed25519.PubKeyEd25519{},
|
||||
ed25519.PubKeyAminoRoute, nil)
|
||||
cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{},
|
||||
secp256k1.PubKeyAminoRoute, nil)
|
||||
}
|
@@ -1,164 +0,0 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
"github.com/tendermint/ed25519"
|
||||
"github.com/tendermint/ed25519/extra25519"
|
||||
)
|
||||
|
||||
func PrivKeyFromBytes(privKeyBytes []byte) (privKey PrivKey, err error) {
|
||||
err = cdc.UnmarshalBinaryBare(privKeyBytes, &privKey)
|
||||
return
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
type PrivKey interface {
|
||||
Bytes() []byte
|
||||
Sign(msg []byte) (Signature, error)
|
||||
PubKey() PubKey
|
||||
Equals(PrivKey) bool
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
var _ PrivKey = PrivKeyEd25519{}
|
||||
|
||||
// Implements PrivKey
|
||||
type PrivKeyEd25519 [64]byte
|
||||
|
||||
func (privKey PrivKeyEd25519) Bytes() []byte {
|
||||
return cdc.MustMarshalBinaryBare(privKey)
|
||||
}
|
||||
|
||||
func (privKey PrivKeyEd25519) Sign(msg []byte) (Signature, error) {
|
||||
privKeyBytes := [64]byte(privKey)
|
||||
signatureBytes := ed25519.Sign(&privKeyBytes, msg)
|
||||
return SignatureEd25519(*signatureBytes), nil
|
||||
}
|
||||
|
||||
func (privKey PrivKeyEd25519) PubKey() PubKey {
|
||||
privKeyBytes := [64]byte(privKey)
|
||||
pubBytes := *ed25519.MakePublicKey(&privKeyBytes)
|
||||
return PubKeyEd25519(pubBytes)
|
||||
}
|
||||
|
||||
// Equals - you probably don't need to use this.
|
||||
// Runs in constant time based on length of the keys.
|
||||
func (privKey PrivKeyEd25519) Equals(other PrivKey) bool {
|
||||
if otherEd, ok := other.(PrivKeyEd25519); ok {
|
||||
return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (privKey PrivKeyEd25519) ToCurve25519() *[32]byte {
|
||||
keyCurve25519 := new([32]byte)
|
||||
privKeyBytes := [64]byte(privKey)
|
||||
extra25519.PrivateKeyToCurve25519(keyCurve25519, &privKeyBytes)
|
||||
return keyCurve25519
|
||||
}
|
||||
|
||||
// Deterministically generates new priv-key bytes from key.
|
||||
func (privKey PrivKeyEd25519) Generate(index int) PrivKeyEd25519 {
|
||||
bz, err := cdc.MarshalBinaryBare(struct {
|
||||
PrivKey [64]byte
|
||||
Index int
|
||||
}{privKey, index})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
newBytes := Sha256(bz)
|
||||
newKey := new([64]byte)
|
||||
copy(newKey[:32], newBytes)
|
||||
ed25519.MakePublicKey(newKey)
|
||||
return PrivKeyEd25519(*newKey)
|
||||
}
|
||||
|
||||
func GenPrivKeyEd25519() PrivKeyEd25519 {
|
||||
privKeyBytes := new([64]byte)
|
||||
copy(privKeyBytes[:32], CRandBytes(32))
|
||||
ed25519.MakePublicKey(privKeyBytes)
|
||||
return PrivKeyEd25519(*privKeyBytes)
|
||||
}
|
||||
|
||||
// NOTE: secret should be the output of a KDF like bcrypt,
|
||||
// if it's derived from user input.
|
||||
func GenPrivKeyEd25519FromSecret(secret []byte) PrivKeyEd25519 {
|
||||
privKey32 := Sha256(secret) // Not Ripemd160 because we want 32 bytes.
|
||||
privKeyBytes := new([64]byte)
|
||||
copy(privKeyBytes[:32], privKey32)
|
||||
ed25519.MakePublicKey(privKeyBytes)
|
||||
return PrivKeyEd25519(*privKeyBytes)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
var _ PrivKey = PrivKeySecp256k1{}
|
||||
|
||||
// Implements PrivKey
|
||||
type PrivKeySecp256k1 [32]byte
|
||||
|
||||
func (privKey PrivKeySecp256k1) Bytes() []byte {
|
||||
return cdc.MustMarshalBinaryBare(privKey)
|
||||
}
|
||||
|
||||
func (privKey PrivKeySecp256k1) Sign(msg []byte) (Signature, error) {
|
||||
priv__, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:])
|
||||
sig__, err := priv__.Sign(Sha256(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return SignatureSecp256k1(sig__.Serialize()), nil
|
||||
}
|
||||
|
||||
func (privKey PrivKeySecp256k1) PubKey() PubKey {
|
||||
_, pub__ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:])
|
||||
var pub PubKeySecp256k1
|
||||
copy(pub[:], pub__.SerializeCompressed())
|
||||
return pub
|
||||
}
|
||||
|
||||
// Equals - you probably don't need to use this.
|
||||
// Runs in constant time based on length of the keys.
|
||||
func (privKey PrivKeySecp256k1) Equals(other PrivKey) bool {
|
||||
if otherSecp, ok := other.(PrivKeySecp256k1); ok {
|
||||
return subtle.ConstantTimeCompare(privKey[:], otherSecp[:]) == 1
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
// Deterministically generates new priv-key bytes from key.
|
||||
func (key PrivKeySecp256k1) Generate(index int) PrivKeySecp256k1 {
|
||||
newBytes := cdc.BinarySha256(struct {
|
||||
PrivKey [64]byte
|
||||
Index int
|
||||
}{key, index})
|
||||
var newKey [64]byte
|
||||
copy(newKey[:], newBytes)
|
||||
return PrivKeySecp256k1(newKey)
|
||||
}
|
||||
*/
|
||||
|
||||
func GenPrivKeySecp256k1() PrivKeySecp256k1 {
|
||||
privKeyBytes := [32]byte{}
|
||||
copy(privKeyBytes[:], CRandBytes(32))
|
||||
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKeyBytes[:])
|
||||
copy(privKeyBytes[:], priv.Serialize())
|
||||
return PrivKeySecp256k1(privKeyBytes)
|
||||
}
|
||||
|
||||
// NOTE: secret should be the output of a KDF like bcrypt,
|
||||
// if it's derived from user input.
|
||||
func GenPrivKeySecp256k1FromSecret(secret []byte) PrivKeySecp256k1 {
|
||||
privKey32 := Sha256(secret) // Not Ripemd160 because we want 32 bytes.
|
||||
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey32)
|
||||
privKeyBytes := [32]byte{}
|
||||
copy(privKeyBytes[:], priv.Serialize())
|
||||
return PrivKeySecp256k1(privKeyBytes)
|
||||
}
|
@@ -1,60 +0,0 @@
|
||||
package crypto_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
)
|
||||
|
||||
func TestGeneratePrivKey(t *testing.T) {
|
||||
testPriv := crypto.GenPrivKeyEd25519()
|
||||
testGenerate := testPriv.Generate(1)
|
||||
signBytes := []byte("something to sign")
|
||||
pub := testGenerate.PubKey()
|
||||
sig, err := testGenerate.Sign(signBytes)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, pub.VerifyBytes(signBytes, sig))
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
type BadKey struct {
|
||||
PrivKeyEd25519
|
||||
}
|
||||
|
||||
func TestReadPrivKey(t *testing.T) {
|
||||
assert, require := assert.New(t), require.New(t)
|
||||
|
||||
// garbage in, garbage out
|
||||
garbage := []byte("hjgewugfbiewgofwgewr")
|
||||
XXX This test wants to register BadKey globally to crypto,
|
||||
but we don't want to support that.
|
||||
_, err := PrivKeyFromBytes(garbage)
|
||||
require.Error(err)
|
||||
|
||||
edKey := GenPrivKeyEd25519()
|
||||
badKey := BadKey{edKey}
|
||||
|
||||
cases := []struct {
|
||||
key PrivKey
|
||||
valid bool
|
||||
}{
|
||||
{edKey, true},
|
||||
{badKey, false},
|
||||
}
|
||||
|
||||
for i, tc := range cases {
|
||||
data := tc.key.Bytes()
|
||||
fmt.Println(">>>", data)
|
||||
key, err := PrivKeyFromBytes(data)
|
||||
fmt.Printf("!!! %#v\n", key, err)
|
||||
if tc.valid {
|
||||
assert.NoError(err, "%d", i)
|
||||
assert.Equal(tc.key, key, "%d", i)
|
||||
} else {
|
||||
assert.Error(err, "%d: %#v", i, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user