mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-17 13:21:57 +00:00
Compare commits
96 Commits
v0.19.7
...
v0.20.1-rc
Author | SHA1 | Date | |
---|---|---|---|
|
4b2348f697 | ||
|
6a324764ac | ||
|
3470e5d7b3 | ||
|
a519825bf8 | ||
|
c84be3b8dd | ||
|
050636d5ce | ||
|
27bd1deabe | ||
|
76c82fd433 | ||
|
9481cabd50 | ||
|
46b957929c | ||
|
512e563d4b | ||
|
a6a4fc7784 | ||
|
bfcec02423 | ||
|
fcf61b8088 | ||
|
46fb179605 | ||
|
89925501f3 | ||
|
6b8613b3e7 | ||
|
8be27494bb | ||
|
c661a3ec21 | ||
|
8e45348737 | ||
|
7dfc74a6b6 | ||
|
2edc68c59b | ||
|
fe4123684d | ||
|
2897685c57 | ||
|
71556c62eb | ||
|
54e61468d4 | ||
|
5c7ccbd4a7 | ||
|
e2f5a6fbe4 | ||
|
aa8be33da1 | ||
|
909f66e841 | ||
|
3d2c4fd309 | ||
|
e5bca1df6f | ||
|
866bcceb35 | ||
|
e1e6878a4d | ||
|
e4147b6f1a | ||
|
7606b7595f | ||
|
485b4a0c6f | ||
|
575d94dbb9 | ||
|
ebd2fe7a68 | ||
|
f28eae7816 | ||
|
e13c1ab735 | ||
|
0e0461d9bc | ||
|
057e076ca9 | ||
|
775fef31c2 | ||
|
9cb079dcc6 | ||
|
67180344b7 | ||
|
825fdf2c24 | ||
|
61002ad264 | ||
|
41e847ec97 | ||
|
55bae62d71 | ||
|
d2259696af | ||
|
32719123d9 | ||
|
2ce8179c8b | ||
|
b8c076ca79 | ||
|
1b2e34738a | ||
|
566024b64f | ||
|
932381effa | ||
|
2007c66091 | ||
|
97c5533c35 | ||
|
3d33226e80 | ||
|
edb851280a | ||
|
dd62f06994 | ||
|
19d95b5410 | ||
|
53937a8129 | ||
|
f1c53c7358 | ||
|
3445f1206e | ||
|
097f778c1e | ||
|
c85c21d1bc | ||
|
fd4db8dfdc | ||
|
1318bd18cd | ||
|
ea896865a7 | ||
|
aeb91dfc22 | ||
|
5727916c5b | ||
|
876c8f14e7 | ||
|
67416feb3a | ||
|
8706ae765c | ||
|
954a8941ff | ||
|
1f22f34edf | ||
|
0562009275 | ||
|
fedd07c522 | ||
|
3fa734ef5a | ||
|
cd6bfdc42f | ||
|
98b0c51b5f | ||
|
c777be256a | ||
|
d66f8bf829 | ||
|
bf370d36c2 | ||
|
1c643701f5 | ||
|
0b0290bdb2 | ||
|
a4779fdf51 | ||
|
7030d5c2a7 | ||
|
f34d1009c4 | ||
|
0e3dc32b3d | ||
|
d292fa4541 | ||
|
3255c076e5 | ||
|
978277a4c1 | ||
|
58eb76f34d |
@@ -3,7 +3,7 @@ version: 2
|
|||||||
defaults: &defaults
|
defaults: &defaults
|
||||||
working_directory: /go/src/github.com/tendermint/tendermint
|
working_directory: /go/src/github.com/tendermint/tendermint
|
||||||
docker:
|
docker:
|
||||||
- image: circleci/golang:1.10.0
|
- image: circleci/golang:1.10.3
|
||||||
environment:
|
environment:
|
||||||
GOBIN: /tmp/workspace/bin
|
GOBIN: /tmp/workspace/bin
|
||||||
|
|
||||||
@@ -133,18 +133,21 @@ jobs:
|
|||||||
key: v1-pkg-cache
|
key: v1-pkg-cache
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||||
|
- run: mkdir -p /tmp/logs
|
||||||
- run:
|
- run:
|
||||||
name: Run tests
|
name: Run tests
|
||||||
command: |
|
command: |
|
||||||
for pkg in $(go list github.com/tendermint/tendermint/... | grep -v /vendor/ | circleci tests split --split-by=timings); do
|
for pkg in $(go list github.com/tendermint/tendermint/... | grep -v /vendor/ | circleci tests split --split-by=timings); do
|
||||||
id=$(basename "$pkg")
|
id=$(basename "$pkg")
|
||||||
|
|
||||||
GOCACHE=off go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg"
|
GOCACHE=off go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||||
done
|
done
|
||||||
- persist_to_workspace:
|
- persist_to_workspace:
|
||||||
root: /tmp/workspace
|
root: /tmp/workspace
|
||||||
paths:
|
paths:
|
||||||
- "profiles/*"
|
- "profiles/*"
|
||||||
|
- store_artifacts:
|
||||||
|
path: /tmp/logs
|
||||||
|
|
||||||
test_persistence:
|
test_persistence:
|
||||||
<<: *defaults
|
<<: *defaults
|
||||||
@@ -196,9 +199,6 @@ workflows:
|
|||||||
test-suite:
|
test-suite:
|
||||||
jobs:
|
jobs:
|
||||||
- setup_dependencies
|
- setup_dependencies
|
||||||
- build_slate:
|
|
||||||
requires:
|
|
||||||
- setup_dependencies
|
|
||||||
- setup_abci:
|
- setup_abci:
|
||||||
requires:
|
requires:
|
||||||
- setup_dependencies
|
- setup_dependencies
|
||||||
|
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -1,4 +1,4 @@
|
|||||||
# CODEOWNERS: https://help.github.com/articles/about-codeowners/
|
# CODEOWNERS: https://help.github.com/articles/about-codeowners/
|
||||||
|
|
||||||
# Everything goes through Bucky and Anton. For now.
|
# Everything goes through Bucky, Anton, Alex. For now.
|
||||||
* @ebuchman @melekes
|
* @ebuchman @melekes @xla
|
||||||
|
74
CHANGELOG.md
74
CHANGELOG.md
@@ -1,5 +1,71 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## 0.20.1
|
||||||
|
|
||||||
|
*June 18th, 2018*
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
|
- [consensus] Fix #1754 where we don't make blocks when `create_empty_blocks=false`
|
||||||
|
- [mempool] Fix #1761 where we don't process txs if `cache_size=0`
|
||||||
|
|
||||||
|
## 0.20.0
|
||||||
|
|
||||||
|
*June 6th, 2018*
|
||||||
|
|
||||||
|
This is the first in a series of breaking releases coming to Tendermint after
|
||||||
|
soliciting developer feedback and conducting security audits.
|
||||||
|
|
||||||
|
This release does not break any blockchain data structures or
|
||||||
|
protocols other than the ABCI messages between Tendermint and the application.
|
||||||
|
|
||||||
|
Applications that upgrade for ABCI v0.11.0 should be able to continue running Tendermint
|
||||||
|
v0.20.0 on blockchains created with v0.19.X
|
||||||
|
|
||||||
|
BREAKING CHANGES
|
||||||
|
|
||||||
|
- [abci] Upgrade to
|
||||||
|
[v0.11.0](https://github.com/tendermint/abci/blob/master/CHANGELOG.md#0110)
|
||||||
|
- [abci] Change Query path for filtering peers by node ID from
|
||||||
|
`p2p/filter/pubkey/<id>` to `p2p/filter/id/<id>`
|
||||||
|
|
||||||
|
## 0.19.9
|
||||||
|
|
||||||
|
*June 5th, 2018*
|
||||||
|
|
||||||
|
BREAKING CHANGES
|
||||||
|
|
||||||
|
- [types/priv_validator] Moved to top level `privval` package
|
||||||
|
|
||||||
|
FEATURES
|
||||||
|
|
||||||
|
- [config] Collapse PeerConfig into P2PConfig
|
||||||
|
- [docs] Add quick-install script
|
||||||
|
- [docs/spec] Add table of Amino prefixes
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
|
- [rpc] Return 404 for unknown endpoints
|
||||||
|
- [consensus] Flush WAL on stop
|
||||||
|
- [evidence] Don't send evidence to peers that are behind
|
||||||
|
- [p2p] Fix memory leak on peer disconnects
|
||||||
|
- [rpc] Fix panic when `per_page=0`
|
||||||
|
|
||||||
|
## 0.19.8
|
||||||
|
|
||||||
|
*June 4th, 2018*
|
||||||
|
|
||||||
|
BREAKING:
|
||||||
|
|
||||||
|
- [p2p] Remove `auth_enc` config option, peer connections are always auth
|
||||||
|
encrypted. Technically a breaking change but seems no one was using it and
|
||||||
|
arguably a bug fix :)
|
||||||
|
|
||||||
|
BUG FIXES
|
||||||
|
|
||||||
|
- [mempool] Fix deadlock under high load when `skip_timeout_commit=true` and
|
||||||
|
`create_empty_blocks=false`
|
||||||
|
|
||||||
## 0.19.7
|
## 0.19.7
|
||||||
|
|
||||||
*May 31st, 2018*
|
*May 31st, 2018*
|
||||||
@@ -19,7 +85,7 @@ FEATURES
|
|||||||
|
|
||||||
IMPROVEMENTS:
|
IMPROVEMENTS:
|
||||||
|
|
||||||
- [consensus] consensus reactor now receives events from a separate event bus,
|
- [consensus] Consensus reactor now receives events from a separate synchronous event bus,
|
||||||
which is not dependant on external RPC load
|
which is not dependant on external RPC load
|
||||||
- [consensus/wal] do not look for height in older files if we've seen height - 1
|
- [consensus/wal] do not look for height in older files if we've seen height - 1
|
||||||
- [docs] Various cleanup and link fixes
|
- [docs] Various cleanup and link fixes
|
||||||
@@ -32,6 +98,12 @@ BUG FIXES
|
|||||||
|
|
||||||
- [blockchain] Fix fast-sync deadlock during high peer turnover
|
- [blockchain] Fix fast-sync deadlock during high peer turnover
|
||||||
|
|
||||||
|
BUG FIX:
|
||||||
|
|
||||||
|
- [evidence] Dont send peers evidence from heights they haven't synced to yet
|
||||||
|
- [p2p] Refuse connections to more than one peer with the same IP
|
||||||
|
- [docs] Various fixes
|
||||||
|
|
||||||
## 0.19.5
|
## 0.19.5
|
||||||
|
|
||||||
*May 20th, 2018*
|
*May 20th, 2018*
|
||||||
|
46
Gopkg.lock
generated
46
Gopkg.lock
generated
@@ -5,7 +5,7 @@
|
|||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/btcsuite/btcd"
|
name = "github.com/btcsuite/btcd"
|
||||||
packages = ["btcec"]
|
packages = ["btcec"]
|
||||||
revision = "675abc5df3c5531bc741b56a765e35623459da6d"
|
revision = "86fed781132ac890ee03e906e4ecd5d6fa180c64"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/davecgh/go-spew"
|
name = "github.com/davecgh/go-spew"
|
||||||
@@ -82,7 +82,7 @@
|
|||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/golang/snappy"
|
name = "github.com/golang/snappy"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "553a641470496b2327abcac10b36396bd98e45c9"
|
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/gorilla/websocket"
|
name = "github.com/gorilla/websocket"
|
||||||
@@ -128,20 +128,20 @@
|
|||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/magiconair/properties"
|
name = "github.com/magiconair/properties"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "c3beff4c2358b44d0493c7dda585e7db7ff28ae6"
|
revision = "c2353362d570a7bfa228149c62842019201cfb71"
|
||||||
version = "v1.7.6"
|
version = "v1.8.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/mitchellh/mapstructure"
|
name = "github.com/mitchellh/mapstructure"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "00c29f56e2386353d58c599509e8dc3801b0d716"
|
revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/pelletier/go-toml"
|
name = "github.com/pelletier/go-toml"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8"
|
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
|
||||||
version = "v1.1.0"
|
version = "v1.2.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/pkg/errors"
|
name = "github.com/pkg/errors"
|
||||||
@@ -159,7 +159,7 @@
|
|||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/rcrowley/go-metrics"
|
name = "github.com/rcrowley/go-metrics"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "d932a24a8ccb8fcadc993e5c6c58f93dac168294"
|
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/spf13/afero"
|
name = "github.com/spf13/afero"
|
||||||
@@ -179,8 +179,8 @@
|
|||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/spf13/cobra"
|
name = "github.com/spf13/cobra"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4"
|
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||||
version = "v0.0.2"
|
version = "v0.0.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@@ -226,7 +226,7 @@
|
|||||||
"leveldb/table",
|
"leveldb/table",
|
||||||
"leveldb/util"
|
"leveldb/util"
|
||||||
]
|
]
|
||||||
revision = "714f901b98fdb3aa954b4193d8cbd64a28d80cad"
|
revision = "5d6fca44a948d2be89a9702de7717f0168403d3d"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/tendermint/abci"
|
name = "github.com/tendermint/abci"
|
||||||
@@ -238,8 +238,8 @@
|
|||||||
"server",
|
"server",
|
||||||
"types"
|
"types"
|
||||||
]
|
]
|
||||||
revision = "78a8905690ef54f9d57e3b2b0ee7ad3a04ef3f1f"
|
revision = "ebee2fe114020aa49c70bbbae50b7079fc7e7b90"
|
||||||
version = "v0.10.3"
|
version = "v0.11.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@@ -263,12 +263,6 @@
|
|||||||
revision = "915416979bf70efa4bcbf1c6cd5d64c5fff9fc19"
|
revision = "915416979bf70efa4bcbf1c6cd5d64c5fff9fc19"
|
||||||
version = "v0.6.2"
|
version = "v0.6.2"
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/tendermint/go-wire"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "fa721242b042ecd4c6ed1a934ee740db4f74e45c"
|
|
||||||
version = "v0.7.3"
|
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/tendermint/tmlibs"
|
name = "github.com/tendermint/tmlibs"
|
||||||
packages = [
|
packages = [
|
||||||
@@ -283,8 +277,8 @@
|
|||||||
"merkle",
|
"merkle",
|
||||||
"test"
|
"test"
|
||||||
]
|
]
|
||||||
revision = "cc5f287c4798ffe88c04d02df219ecb6932080fd"
|
revision = "692f1d86a6e2c0efa698fd1e4541b68c74ffaf38"
|
||||||
version = "v0.8.3-rc0"
|
version = "v0.8.4"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@@ -299,7 +293,7 @@
|
|||||||
"ripemd160",
|
"ripemd160",
|
||||||
"salsa20/salsa"
|
"salsa20/salsa"
|
||||||
]
|
]
|
||||||
revision = "b0697eccbea9adec5b7ba8008f4c33d98d733388"
|
revision = "b47b1587369238182299fe4dad77d05b8b461e06"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@@ -311,16 +305,15 @@
|
|||||||
"http2/hpack",
|
"http2/hpack",
|
||||||
"idna",
|
"idna",
|
||||||
"internal/timeseries",
|
"internal/timeseries",
|
||||||
"lex/httplex",
|
|
||||||
"trace"
|
"trace"
|
||||||
]
|
]
|
||||||
revision = "5f9ae10d9af5b1c89ae6904293b14b064d4ada23"
|
revision = "1e491301e022f8f977054da4c2d852decd59571f"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/sys"
|
name = "golang.org/x/sys"
|
||||||
packages = ["unix"]
|
packages = ["unix"]
|
||||||
revision = "bb9c189858d91f42db229b04d45a4c3d23a7662a"
|
revision = "9527bec2660bd847c050fda93a0f0c6dee0800bb"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "golang.org/x/text"
|
name = "golang.org/x/text"
|
||||||
@@ -344,7 +337,6 @@
|
|||||||
version = "v0.3.0"
|
version = "v0.3.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
|
||||||
name = "google.golang.org/genproto"
|
name = "google.golang.org/genproto"
|
||||||
packages = ["googleapis/rpc/status"]
|
packages = ["googleapis/rpc/status"]
|
||||||
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
|
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
|
||||||
@@ -382,6 +374,6 @@
|
|||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
inputs-digest = "d85c98dcac32cc1fe05d006aa75e8985f6447a150a041b972a673a65e7681da9"
|
inputs-digest = "ae6792578b0664708339f4c05e020687d6b799ad6f8282394b919de69e403d1f"
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
11
Gopkg.toml
11
Gopkg.toml
@@ -71,7 +71,7 @@
|
|||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/tendermint/abci"
|
name = "github.com/tendermint/abci"
|
||||||
version = "~0.10.3"
|
version = "~0.11.0"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/tendermint/go-crypto"
|
name = "github.com/tendermint/go-crypto"
|
||||||
@@ -79,16 +79,21 @@
|
|||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/tendermint/go-amino"
|
name = "github.com/tendermint/go-amino"
|
||||||
version = "0.9.9"
|
version = "=0.9.9"
|
||||||
|
|
||||||
[[override]]
|
[[override]]
|
||||||
name = "github.com/tendermint/tmlibs"
|
name = "github.com/tendermint/tmlibs"
|
||||||
version = "~0.8.3-rc0"
|
version = "~0.8.4"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "google.golang.org/grpc"
|
name = "google.golang.org/grpc"
|
||||||
version = "~1.7.3"
|
version = "~1.7.3"
|
||||||
|
|
||||||
|
# this got updated and broke, so locked to an old working commit ...
|
||||||
|
[[override]]
|
||||||
|
name = "google.golang.org/genproto"
|
||||||
|
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
|
||||||
|
|
||||||
[prune]
|
[prune]
|
||||||
go-tests = true
|
go-tests = true
|
||||||
unused-packages = true
|
unused-packages = true
|
||||||
|
@@ -36,7 +36,7 @@ func newBlockchainReactor(logger log.Logger, maxBlockHeight int64) *BlockchainRe
|
|||||||
fastSync := true
|
fastSync := true
|
||||||
var nilApp proxy.AppConnConsensus
|
var nilApp proxy.AppConnConsensus
|
||||||
blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nilApp,
|
blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nilApp,
|
||||||
types.MockMempool{}, types.MockEvidencePool{})
|
sm.MockMempool{}, sm.MockEvidencePool{})
|
||||||
|
|
||||||
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||||
|
@@ -8,7 +8,7 @@ import (
|
|||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
|
|
||||||
priv_val "github.com/tendermint/tendermint/types/priv_validator"
|
"github.com/tendermint/tendermint/privval"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@@ -30,13 +30,13 @@ func main() {
|
|||||||
"privPath", *privValPath,
|
"privPath", *privValPath,
|
||||||
)
|
)
|
||||||
|
|
||||||
privVal := priv_val.LoadFilePV(*privValPath)
|
pv := privval.LoadFilePV(*privValPath)
|
||||||
|
|
||||||
rs := priv_val.NewRemoteSigner(
|
rs := privval.NewRemoteSigner(
|
||||||
logger,
|
logger,
|
||||||
*chainID,
|
*chainID,
|
||||||
*addr,
|
*addr,
|
||||||
privVal,
|
pv,
|
||||||
crypto.GenPrivKeyEd25519(),
|
crypto.GenPrivKeyEd25519(),
|
||||||
)
|
)
|
||||||
err := rs.Start()
|
err := rs.Start()
|
||||||
|
@@ -5,7 +5,7 @@ import (
|
|||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
"github.com/tendermint/tendermint/privval"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GenValidatorCmd allows the generation of a keypair for a
|
// GenValidatorCmd allows the generation of a keypair for a
|
||||||
@@ -17,7 +17,7 @@ var GenValidatorCmd = &cobra.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func genValidator(cmd *cobra.Command, args []string) {
|
func genValidator(cmd *cobra.Command, args []string) {
|
||||||
pv := pvm.GenFilePV("")
|
pv := privval.GenFilePV("")
|
||||||
jsbz, err := cdc.MarshalJSON(pv)
|
jsbz, err := cdc.MarshalJSON(pv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@@ -7,8 +7,8 @@ import (
|
|||||||
|
|
||||||
cfg "github.com/tendermint/tendermint/config"
|
cfg "github.com/tendermint/tendermint/config"
|
||||||
"github.com/tendermint/tendermint/p2p"
|
"github.com/tendermint/tendermint/p2p"
|
||||||
|
"github.com/tendermint/tendermint/privval"
|
||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -26,12 +26,12 @@ func initFiles(cmd *cobra.Command, args []string) error {
|
|||||||
func initFilesWithConfig(config *cfg.Config) error {
|
func initFilesWithConfig(config *cfg.Config) error {
|
||||||
// private validator
|
// private validator
|
||||||
privValFile := config.PrivValidatorFile()
|
privValFile := config.PrivValidatorFile()
|
||||||
var pv *pvm.FilePV
|
var pv *privval.FilePV
|
||||||
if cmn.FileExists(privValFile) {
|
if cmn.FileExists(privValFile) {
|
||||||
pv = pvm.LoadFilePV(privValFile)
|
pv = privval.LoadFilePV(privValFile)
|
||||||
logger.Info("Found private validator", "path", privValFile)
|
logger.Info("Found private validator", "path", privValFile)
|
||||||
} else {
|
} else {
|
||||||
pv = pvm.GenFilePV(privValFile)
|
pv = privval.GenFilePV(privValFile)
|
||||||
pv.Save()
|
pv.Save()
|
||||||
logger.Info("Generated private validator", "path", privValFile)
|
logger.Info("Generated private validator", "path", privValFile)
|
||||||
}
|
}
|
||||||
|
@@ -5,7 +5,7 @@ import (
|
|||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
"github.com/tendermint/tendermint/privval"
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -13,14 +13,14 @@ import (
|
|||||||
// instance.
|
// instance.
|
||||||
var ResetAllCmd = &cobra.Command{
|
var ResetAllCmd = &cobra.Command{
|
||||||
Use: "unsafe_reset_all",
|
Use: "unsafe_reset_all",
|
||||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator",
|
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
||||||
Run: resetAll,
|
Run: resetAll,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResetPrivValidatorCmd resets the private validator files.
|
// ResetPrivValidatorCmd resets the private validator files.
|
||||||
var ResetPrivValidatorCmd = &cobra.Command{
|
var ResetPrivValidatorCmd = &cobra.Command{
|
||||||
Use: "unsafe_reset_priv_validator",
|
Use: "unsafe_reset_priv_validator",
|
||||||
Short: "(unsafe) Reset this node's validator",
|
Short: "(unsafe) Reset this node's validator to genesis state",
|
||||||
Run: resetPrivValidator,
|
Run: resetPrivValidator,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -32,7 +32,7 @@ func ResetAll(dbDir, privValFile string, logger log.Logger) {
|
|||||||
logger.Error("Error removing directory", "err", err)
|
logger.Error("Error removing directory", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Info("Removed all data", "dir", dbDir)
|
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: this is totally unsafe.
|
// XXX: this is totally unsafe.
|
||||||
@@ -50,11 +50,11 @@ func resetPrivValidator(cmd *cobra.Command, args []string) {
|
|||||||
func resetFilePV(privValFile string, logger log.Logger) {
|
func resetFilePV(privValFile string, logger log.Logger) {
|
||||||
// Get PrivValidator
|
// Get PrivValidator
|
||||||
if _, err := os.Stat(privValFile); err == nil {
|
if _, err := os.Stat(privValFile); err == nil {
|
||||||
pv := pvm.LoadFilePV(privValFile)
|
pv := privval.LoadFilePV(privValFile)
|
||||||
pv.Reset()
|
pv.Reset()
|
||||||
logger.Info("Reset PrivValidator", "file", privValFile)
|
logger.Info("Reset PrivValidator to genesis state", "file", privValFile)
|
||||||
} else {
|
} else {
|
||||||
pv := pvm.GenFilePV(privValFile)
|
pv := privval.GenFilePV(privValFile)
|
||||||
pv.Save()
|
pv.Save()
|
||||||
logger.Info("Generated PrivValidator", "file", privValFile)
|
logger.Info("Generated PrivValidator", "file", privValFile)
|
||||||
}
|
}
|
||||||
|
@@ -5,7 +5,7 @@ import (
|
|||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
privval "github.com/tendermint/tendermint/types/priv_validator"
|
"github.com/tendermint/tendermint/privval"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ShowValidatorCmd adds capabilities for showing the validator info.
|
// ShowValidatorCmd adds capabilities for showing the validator info.
|
||||||
|
@@ -12,8 +12,8 @@ import (
|
|||||||
|
|
||||||
cfg "github.com/tendermint/tendermint/config"
|
cfg "github.com/tendermint/tendermint/config"
|
||||||
"github.com/tendermint/tendermint/p2p"
|
"github.com/tendermint/tendermint/p2p"
|
||||||
|
"github.com/tendermint/tendermint/privval"
|
||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -89,7 +89,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
|||||||
initFilesWithConfig(config)
|
initFilesWithConfig(config)
|
||||||
|
|
||||||
pvFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidator)
|
pvFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidator)
|
||||||
pv := pvm.LoadFilePV(pvFile)
|
pv := privval.LoadFilePV(pvFile)
|
||||||
genVals[i] = types.GenesisValidator{
|
genVals[i] = types.GenesisValidator{
|
||||||
PubKey: pv.GetPubKey(),
|
PubKey: pv.GetPubKey(),
|
||||||
Power: 1,
|
Power: 1,
|
||||||
|
@@ -7,6 +7,13 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// FuzzModeDrop is a mode in which we randomly drop reads/writes, connections or sleep
|
||||||
|
FuzzModeDrop = iota
|
||||||
|
// FuzzModeDelay is a mode in which we randomly sleep
|
||||||
|
FuzzModeDelay
|
||||||
|
)
|
||||||
|
|
||||||
// NOTE: Most of the structs & relevant comments + the
|
// NOTE: Most of the structs & relevant comments + the
|
||||||
// default configuration options were used to manually
|
// default configuration options were used to manually
|
||||||
// generate the config.toml. Please reflect any changes
|
// generate the config.toml. Please reflect any changes
|
||||||
@@ -287,14 +294,23 @@ type P2PConfig struct {
|
|||||||
// Does not work if the peer-exchange reactor is disabled.
|
// Does not work if the peer-exchange reactor is disabled.
|
||||||
SeedMode bool `mapstructure:"seed_mode"`
|
SeedMode bool `mapstructure:"seed_mode"`
|
||||||
|
|
||||||
// Authenticated encryption
|
// Comma separated list of peer IDs to keep private (will not be gossiped to
|
||||||
AuthEnc bool `mapstructure:"auth_enc"`
|
// other peers)
|
||||||
|
|
||||||
// Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
|
||||||
PrivatePeerIDs string `mapstructure:"private_peer_ids"`
|
PrivatePeerIDs string `mapstructure:"private_peer_ids"`
|
||||||
|
|
||||||
// Toggle to disable guard against peers connecting from the same ip.
|
// Toggle to disable guard against peers connecting from the same ip.
|
||||||
AllowDuplicateIP bool `mapstructure:"allow_duplicate_ip"`
|
AllowDuplicateIP bool `mapstructure:"allow_duplicate_ip"`
|
||||||
|
|
||||||
|
// Peer connection configuration.
|
||||||
|
HandshakeTimeout time.Duration `mapstructure:"handshake_timeout"`
|
||||||
|
DialTimeout time.Duration `mapstructure:"dial_timeout"`
|
||||||
|
|
||||||
|
// Testing params.
|
||||||
|
// Force dial to fail
|
||||||
|
TestDialFail bool `mapstructure:"test_dial_fail"`
|
||||||
|
// FUzz connection
|
||||||
|
TestFuzz bool `mapstructure:"test_fuzz"`
|
||||||
|
TestFuzzConfig *FuzzConnConfig `mapstructure:"test_fuzz_config"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer
|
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer
|
||||||
@@ -310,8 +326,12 @@ func DefaultP2PConfig() *P2PConfig {
|
|||||||
RecvRate: 512000, // 500 kB/s
|
RecvRate: 512000, // 500 kB/s
|
||||||
PexReactor: true,
|
PexReactor: true,
|
||||||
SeedMode: false,
|
SeedMode: false,
|
||||||
AuthEnc: true,
|
|
||||||
AllowDuplicateIP: true, // so non-breaking yet
|
AllowDuplicateIP: true, // so non-breaking yet
|
||||||
|
HandshakeTimeout: 20 * time.Second,
|
||||||
|
DialTimeout: 3 * time.Second,
|
||||||
|
TestDialFail: false,
|
||||||
|
TestFuzz: false,
|
||||||
|
TestFuzzConfig: DefaultFuzzConnConfig(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -330,6 +350,26 @@ func (cfg *P2PConfig) AddrBookFile() string {
|
|||||||
return rootify(cfg.AddrBook, cfg.RootDir)
|
return rootify(cfg.AddrBook, cfg.RootDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FuzzConnConfig is a FuzzedConnection configuration.
|
||||||
|
type FuzzConnConfig struct {
|
||||||
|
Mode int
|
||||||
|
MaxDelay time.Duration
|
||||||
|
ProbDropRW float64
|
||||||
|
ProbDropConn float64
|
||||||
|
ProbSleep float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultFuzzConnConfig returns the default config.
|
||||||
|
func DefaultFuzzConnConfig() *FuzzConnConfig {
|
||||||
|
return &FuzzConnConfig{
|
||||||
|
Mode: FuzzModeDrop,
|
||||||
|
MaxDelay: 3 * time.Second,
|
||||||
|
ProbDropRW: 0.2,
|
||||||
|
ProbDropConn: 0.00,
|
||||||
|
ProbSleep: 0.00,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//-----------------------------------------------------------------------------
|
//-----------------------------------------------------------------------------
|
||||||
// MempoolConfig
|
// MempoolConfig
|
||||||
|
|
||||||
|
@@ -165,9 +165,6 @@ pex = {{ .P2P.PexReactor }}
|
|||||||
# Does not work if the peer-exchange reactor is disabled.
|
# Does not work if the peer-exchange reactor is disabled.
|
||||||
seed_mode = {{ .P2P.SeedMode }}
|
seed_mode = {{ .P2P.SeedMode }}
|
||||||
|
|
||||||
# Authenticated encryption
|
|
||||||
auth_enc = {{ .P2P.AuthEnc }}
|
|
||||||
|
|
||||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||||
private_peer_ids = "{{ .P2P.PrivatePeerIDs }}"
|
private_peer_ids = "{{ .P2P.PrivatePeerIDs }}"
|
||||||
|
|
||||||
|
@@ -58,14 +58,11 @@ func TestByzantine(t *testing.T) {
|
|||||||
css[i].doPrevote = func(height int64, round int) {}
|
css[i].doPrevote = func(height int64, round int) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
eventBus := types.NewEventBus()
|
eventBus := css[i].eventBus
|
||||||
eventBus.SetLogger(logger.With("module", "events", "validator", i))
|
eventBus.SetLogger(logger.With("module", "events", "validator", i))
|
||||||
err := eventBus.Start()
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer eventBus.Stop()
|
|
||||||
|
|
||||||
eventChans[i] = make(chan interface{}, 1)
|
eventChans[i] = make(chan interface{}, 1)
|
||||||
err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
|
err := eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
|
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
|
||||||
@@ -105,15 +102,18 @@ func TestByzantine(t *testing.T) {
|
|||||||
p2p.Connect2Switches(sws, i, j)
|
p2p.Connect2Switches(sws, i, j)
|
||||||
})
|
})
|
||||||
|
|
||||||
// start the state machines
|
// start the non-byz state machines.
|
||||||
byzR := reactors[0].(*ByzantineReactor)
|
// note these must be started before the byz
|
||||||
s := byzR.reactor.conS.GetState()
|
|
||||||
byzR.reactor.SwitchToConsensus(s, 0)
|
|
||||||
for i := 1; i < N; i++ {
|
for i := 1; i < N; i++ {
|
||||||
cr := reactors[i].(*ConsensusReactor)
|
cr := reactors[i].(*ConsensusReactor)
|
||||||
cr.SwitchToConsensus(cr.conS.GetState(), 0)
|
cr.SwitchToConsensus(cr.conS.GetState(), 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// start the byzantine state machine
|
||||||
|
byzR := reactors[0].(*ByzantineReactor)
|
||||||
|
s := byzR.reactor.conS.GetState()
|
||||||
|
byzR.reactor.SwitchToConsensus(s, 0)
|
||||||
|
|
||||||
// byz proposer sends one block to peers[0]
|
// byz proposer sends one block to peers[0]
|
||||||
// and the other block to peers[1] and peers[2].
|
// and the other block to peers[1] and peers[2].
|
||||||
// note peers and switches order don't match.
|
// note peers and switches order don't match.
|
||||||
|
@@ -19,9 +19,9 @@ import (
|
|||||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||||
mempl "github.com/tendermint/tendermint/mempool"
|
mempl "github.com/tendermint/tendermint/mempool"
|
||||||
"github.com/tendermint/tendermint/p2p"
|
"github.com/tendermint/tendermint/p2p"
|
||||||
|
"github.com/tendermint/tendermint/privval"
|
||||||
sm "github.com/tendermint/tendermint/state"
|
sm "github.com/tendermint/tendermint/state"
|
||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
dbm "github.com/tendermint/tmlibs/db"
|
dbm "github.com/tendermint/tmlibs/db"
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
@@ -262,7 +262,7 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mock the evidence pool
|
// mock the evidence pool
|
||||||
evpool := types.MockEvidencePool{}
|
evpool := sm.MockEvidencePool{}
|
||||||
|
|
||||||
// Make ConsensusState
|
// Make ConsensusState
|
||||||
stateDB := dbm.NewMemDB()
|
stateDB := dbm.NewMemDB()
|
||||||
@@ -278,10 +278,10 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
|
|||||||
return cs
|
return cs
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadPrivValidator(config *cfg.Config) *pvm.FilePV {
|
func loadPrivValidator(config *cfg.Config) *privval.FilePV {
|
||||||
privValidatorFile := config.PrivValidatorFile()
|
privValidatorFile := config.PrivValidatorFile()
|
||||||
ensureDir(path.Dir(privValidatorFile), 0700)
|
ensureDir(path.Dir(privValidatorFile), 0700)
|
||||||
privValidator := pvm.LoadOrGenFilePV(privValidatorFile)
|
privValidator := privval.LoadOrGenFilePV(privValidatorFile)
|
||||||
privValidator.Reset()
|
privValidator.Reset()
|
||||||
return privValidator
|
return privValidator
|
||||||
}
|
}
|
||||||
@@ -379,7 +379,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
|||||||
privVal = privVals[i]
|
privVal = privVals[i]
|
||||||
} else {
|
} else {
|
||||||
_, tempFilePath := cmn.Tempfile("priv_validator_")
|
_, tempFilePath := cmn.Tempfile("priv_validator_")
|
||||||
privVal = pvm.GenFilePV(tempFilePath)
|
privVal = privval.GenFilePV(tempFilePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
app := appFunc()
|
app := appFunc()
|
||||||
|
@@ -254,7 +254,8 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
|||||||
logger.Debug("---------------------------- Testing changing the voting power of one validator a few times")
|
logger.Debug("---------------------------- Testing changing the voting power of one validator a few times")
|
||||||
|
|
||||||
val1PubKey := css[0].privValidator.GetPubKey()
|
val1PubKey := css[0].privValidator.GetPubKey()
|
||||||
updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKey.Bytes(), 25)
|
val1PubKeyABCI := types.TM2PB.PubKey(val1PubKey)
|
||||||
|
updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25)
|
||||||
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
|
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||||
|
|
||||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||||
@@ -266,7 +267,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
|||||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||||
}
|
}
|
||||||
|
|
||||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKey.Bytes(), 2)
|
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2)
|
||||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||||
|
|
||||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||||
@@ -278,7 +279,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
|||||||
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||||
}
|
}
|
||||||
|
|
||||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKey.Bytes(), 26)
|
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26)
|
||||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||||
|
|
||||||
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
|
||||||
@@ -316,7 +317,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
|||||||
logger.Info("---------------------------- Testing adding one validator")
|
logger.Info("---------------------------- Testing adding one validator")
|
||||||
|
|
||||||
newValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
newValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
||||||
newValidatorTx1 := kvstore.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), testMinPower)
|
valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1)
|
||||||
|
newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
|
||||||
|
|
||||||
// wait till everyone makes block 2
|
// wait till everyone makes block 2
|
||||||
// ensure the commit includes all validators
|
// ensure the commit includes all validators
|
||||||
@@ -342,7 +344,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
|||||||
logger.Info("---------------------------- Testing changing the voting power of one validator")
|
logger.Info("---------------------------- Testing changing the voting power of one validator")
|
||||||
|
|
||||||
updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
||||||
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updateValidatorPubKey1.Bytes(), 25)
|
updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1)
|
||||||
|
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
|
||||||
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
|
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
|
||||||
|
|
||||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1)
|
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1)
|
||||||
@@ -358,10 +361,12 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
|||||||
logger.Info("---------------------------- Testing adding two validators at once")
|
logger.Info("---------------------------- Testing adding two validators at once")
|
||||||
|
|
||||||
newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey()
|
newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey()
|
||||||
newValidatorTx2 := kvstore.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), testMinPower)
|
newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2)
|
||||||
|
newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
|
||||||
|
|
||||||
newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey()
|
newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey()
|
||||||
newValidatorTx3 := kvstore.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), testMinPower)
|
newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3)
|
||||||
|
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
|
||||||
|
|
||||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||||
@@ -373,8 +378,8 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
|||||||
//---------------------------------------------------------------------------
|
//---------------------------------------------------------------------------
|
||||||
logger.Info("---------------------------- Testing removing two validators at once")
|
logger.Info("---------------------------- Testing removing two validators at once")
|
||||||
|
|
||||||
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), 0)
|
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
|
||||||
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), 0)
|
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
|
||||||
|
|
||||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
||||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
|
||||||
|
@@ -2,7 +2,6 @@ package consensus
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
@@ -196,21 +195,21 @@ func makeHeightSearchFunc(height int64) auto.SearchFunc {
|
|||||||
type Handshaker struct {
|
type Handshaker struct {
|
||||||
stateDB dbm.DB
|
stateDB dbm.DB
|
||||||
initialState sm.State
|
initialState sm.State
|
||||||
store types.BlockStore
|
store sm.BlockStore
|
||||||
appState json.RawMessage
|
genDoc *types.GenesisDoc
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
|
||||||
nBlocks int // number of blocks applied to the state
|
nBlocks int // number of blocks applied to the state
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHandshaker(stateDB dbm.DB, state sm.State,
|
func NewHandshaker(stateDB dbm.DB, state sm.State,
|
||||||
store types.BlockStore, appState json.RawMessage) *Handshaker {
|
store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker {
|
||||||
|
|
||||||
return &Handshaker{
|
return &Handshaker{
|
||||||
stateDB: stateDB,
|
stateDB: stateDB,
|
||||||
initialState: state,
|
initialState: state,
|
||||||
store: store,
|
store: store,
|
||||||
appState: appState,
|
genDoc: genDoc,
|
||||||
logger: log.NewNopLogger(),
|
logger: log.NewNopLogger(),
|
||||||
nBlocks: 0,
|
nBlocks: 0,
|
||||||
}
|
}
|
||||||
@@ -268,14 +267,33 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
|
|||||||
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain
|
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain
|
||||||
if appBlockHeight == 0 {
|
if appBlockHeight == 0 {
|
||||||
validators := types.TM2PB.Validators(state.Validators)
|
validators := types.TM2PB.Validators(state.Validators)
|
||||||
|
csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams)
|
||||||
req := abci.RequestInitChain{
|
req := abci.RequestInitChain{
|
||||||
Validators: validators,
|
Time: h.genDoc.GenesisTime.Unix(), // TODO
|
||||||
AppStateBytes: h.appState,
|
ChainId: h.genDoc.ChainID,
|
||||||
|
ConsensusParams: csParams,
|
||||||
|
Validators: validators,
|
||||||
|
AppStateBytes: h.genDoc.AppStateJSON,
|
||||||
}
|
}
|
||||||
_, err := proxyApp.Consensus().InitChainSync(req)
|
res, err := proxyApp.Consensus().InitChainSync(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if the app returned validators
|
||||||
|
// or consensus params, update the state
|
||||||
|
// with the them
|
||||||
|
if len(res.Validators) > 0 {
|
||||||
|
vals, err := types.PB2TM.Validators(res.Validators)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
state.Validators = types.NewValidatorSet(vals)
|
||||||
|
}
|
||||||
|
if res.ConsensusParams != nil {
|
||||||
|
state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams)
|
||||||
|
}
|
||||||
|
sm.SaveState(h.stateDB, state)
|
||||||
}
|
}
|
||||||
|
|
||||||
// First handle edge cases and constraints on the storeBlockHeight
|
// First handle edge cases and constraints on the storeBlockHeight
|
||||||
@@ -365,7 +383,7 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
|
|||||||
for i := appBlockHeight + 1; i <= finalBlock; i++ {
|
for i := appBlockHeight + 1; i <= finalBlock; i++ {
|
||||||
h.logger.Info("Applying block", "height", i)
|
h.logger.Info("Applying block", "height", i)
|
||||||
block := h.store.LoadBlock(i)
|
block := h.store.LoadBlock(i)
|
||||||
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger)
|
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, state.LastValidators, h.stateDB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -390,7 +408,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
|
|||||||
block := h.store.LoadBlock(height)
|
block := h.store.LoadBlock(height)
|
||||||
meta := h.store.LoadBlockMeta(height)
|
meta := h.store.LoadBlockMeta(height)
|
||||||
|
|
||||||
blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, types.MockMempool{}, types.MockEvidencePool{})
|
blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, sm.MockMempool{}, sm.MockEvidencePool{})
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
state, err = blockExec.ApplyBlock(state, meta.BlockID, block)
|
state, err = blockExec.ApplyBlock(state, meta.BlockID, block)
|
||||||
|
@@ -299,7 +299,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
|||||||
// Create proxyAppConn connection (consensus, mempool, query)
|
// Create proxyAppConn connection (consensus, mempool, query)
|
||||||
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
|
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
|
||||||
proxyApp := proxy.NewAppConns(clientCreator,
|
proxyApp := proxy.NewAppConns(clientCreator,
|
||||||
NewHandshaker(stateDB, state, blockStore, gdoc.AppState()))
|
NewHandshaker(stateDB, state, blockStore, gdoc))
|
||||||
err = proxyApp.Start()
|
err = proxyApp.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
|
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
|
||||||
@@ -310,7 +310,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
|||||||
cmn.Exit(cmn.Fmt("Failed to start event bus: %v", err))
|
cmn.Exit(cmn.Fmt("Failed to start event bus: %v", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
mempool, evpool := types.MockMempool{}, types.MockEvidencePool{}
|
mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{}
|
||||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
||||||
|
|
||||||
consensusState := NewConsensusState(csConfig, state.Copy(), blockExec,
|
consensusState := NewConsensusState(csConfig, state.Copy(), blockExec,
|
||||||
|
@@ -13,6 +13,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/tendermint/abci/example/kvstore"
|
"github.com/tendermint/abci/example/kvstore"
|
||||||
@@ -23,10 +24,10 @@ import (
|
|||||||
dbm "github.com/tendermint/tmlibs/db"
|
dbm "github.com/tendermint/tmlibs/db"
|
||||||
|
|
||||||
cfg "github.com/tendermint/tendermint/config"
|
cfg "github.com/tendermint/tendermint/config"
|
||||||
|
"github.com/tendermint/tendermint/privval"
|
||||||
"github.com/tendermint/tendermint/proxy"
|
"github.com/tendermint/tendermint/proxy"
|
||||||
sm "github.com/tendermint/tendermint/state"
|
sm "github.com/tendermint/tendermint/state"
|
||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -263,8 +264,8 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mempool = types.MockMempool{}
|
mempool = sm.MockMempool{}
|
||||||
evpool = types.MockEvidencePool{}
|
evpool = sm.MockEvidencePool{}
|
||||||
)
|
)
|
||||||
|
|
||||||
//---------------------------------------
|
//---------------------------------------
|
||||||
@@ -329,7 +330,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
|||||||
walFile := tempWALWithData(walBody)
|
walFile := tempWALWithData(walBody)
|
||||||
config.Consensus.SetWalFile(walFile)
|
config.Consensus.SetWalFile(walFile)
|
||||||
|
|
||||||
privVal := pvm.LoadFilePV(config.PrivValidatorFile())
|
privVal := privval.LoadFilePV(config.PrivValidatorFile())
|
||||||
|
|
||||||
wal, err := NewWAL(walFile)
|
wal, err := NewWAL(walFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -366,7 +367,8 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// now start the app using the handshake - it should sync
|
// now start the app using the handshake - it should sync
|
||||||
handshaker := NewHandshaker(stateDB, state, store, nil)
|
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
||||||
|
handshaker := NewHandshaker(stateDB, state, store, genDoc)
|
||||||
proxyApp := proxy.NewAppConns(clientCreator2, handshaker)
|
proxyApp := proxy.NewAppConns(clientCreator2, handshaker)
|
||||||
if err := proxyApp.Start(); err != nil {
|
if err := proxyApp.Start(); err != nil {
|
||||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||||
@@ -416,10 +418,10 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
|
|||||||
}
|
}
|
||||||
defer proxyApp.Stop()
|
defer proxyApp.Stop()
|
||||||
|
|
||||||
// TODO: get the genesis bytes (https://github.com/tendermint/tendermint/issues/1224)
|
|
||||||
var genesisBytes []byte
|
|
||||||
validators := types.TM2PB.Validators(state.Validators)
|
validators := types.TM2PB.Validators(state.Validators)
|
||||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators, genesisBytes}); err != nil {
|
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||||
|
Validators: validators,
|
||||||
|
}); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -453,10 +455,10 @@ func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, c
|
|||||||
}
|
}
|
||||||
defer proxyApp.Stop()
|
defer proxyApp.Stop()
|
||||||
|
|
||||||
// TODO: get the genesis bytes (https://github.com/tendermint/tendermint/issues/1224)
|
|
||||||
var genesisBytes []byte
|
|
||||||
validators := types.TM2PB.Validators(state.Validators)
|
validators := types.TM2PB.Validators(state.Validators)
|
||||||
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators, genesisBytes}); err != nil {
|
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
|
||||||
|
Validators: validators,
|
||||||
|
}); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -633,3 +635,53 @@ func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit {
|
|||||||
func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
|
func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||||
return bs.commits[height-1]
|
return bs.commits[height-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//----------------------------------------
|
||||||
|
|
||||||
|
func TestInitChainUpdateValidators(t *testing.T) {
|
||||||
|
val, _ := types.RandValidator(true, 10)
|
||||||
|
vals := types.NewValidatorSet([]*types.Validator{val})
|
||||||
|
app := &initChainApp{vals: types.TM2PB.Validators(vals)}
|
||||||
|
clientCreator := proxy.NewLocalClientCreator(app)
|
||||||
|
|
||||||
|
config := ResetConfig("proxy_test_")
|
||||||
|
privVal := privval.LoadFilePV(config.PrivValidatorFile())
|
||||||
|
stateDB, state, store := stateAndStore(config, privVal.GetPubKey())
|
||||||
|
|
||||||
|
oldValAddr := state.Validators.Validators[0].Address
|
||||||
|
|
||||||
|
// now start the app using the handshake - it should sync
|
||||||
|
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
|
||||||
|
handshaker := NewHandshaker(stateDB, state, store, genDoc)
|
||||||
|
proxyApp := proxy.NewAppConns(clientCreator, handshaker)
|
||||||
|
if err := proxyApp.Start(); err != nil {
|
||||||
|
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||||
|
}
|
||||||
|
defer proxyApp.Stop()
|
||||||
|
|
||||||
|
// reload the state, check the validator set was updated
|
||||||
|
state = sm.LoadState(stateDB)
|
||||||
|
|
||||||
|
newValAddr := state.Validators.Validators[0].Address
|
||||||
|
expectValAddr := val.Address
|
||||||
|
assert.NotEqual(t, oldValAddr, newValAddr)
|
||||||
|
assert.Equal(t, newValAddr, expectValAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInitChainApp(vals []abci.Validator) *initChainApp {
|
||||||
|
return &initChainApp{
|
||||||
|
vals: vals,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns the vals on InitChain
|
||||||
|
type initChainApp struct {
|
||||||
|
abci.BaseApplication
|
||||||
|
vals []abci.Validator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain {
|
||||||
|
return abci.ResponseInitChain{
|
||||||
|
Validators: ica.vals,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -76,9 +76,9 @@ type ConsensusState struct {
|
|||||||
// services for creating and executing blocks
|
// services for creating and executing blocks
|
||||||
// TODO: encapsulate all of this in one "BlockManager"
|
// TODO: encapsulate all of this in one "BlockManager"
|
||||||
blockExec *sm.BlockExecutor
|
blockExec *sm.BlockExecutor
|
||||||
blockStore types.BlockStore
|
blockStore sm.BlockStore
|
||||||
mempool types.Mempool
|
mempool sm.Mempool
|
||||||
evpool types.EvidencePool
|
evpool sm.EvidencePool
|
||||||
|
|
||||||
// internal state
|
// internal state
|
||||||
mtx sync.Mutex
|
mtx sync.Mutex
|
||||||
@@ -118,7 +118,7 @@ type ConsensusState struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewConsensusState returns a new ConsensusState.
|
// NewConsensusState returns a new ConsensusState.
|
||||||
func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *sm.BlockExecutor, blockStore types.BlockStore, mempool types.Mempool, evpool types.EvidencePool) *ConsensusState {
|
func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, mempool sm.Mempool, evpool sm.EvidencePool) *ConsensusState {
|
||||||
cs := &ConsensusState{
|
cs := &ConsensusState{
|
||||||
config: config,
|
config: config,
|
||||||
blockExec: blockExec,
|
blockExec: blockExec,
|
||||||
@@ -460,9 +460,12 @@ func (cs *ConsensusState) updateToState(state sm.State) {
|
|||||||
|
|
||||||
// If state isn't further out than cs.state, just ignore.
|
// If state isn't further out than cs.state, just ignore.
|
||||||
// This happens when SwitchToConsensus() is called in the reactor.
|
// This happens when SwitchToConsensus() is called in the reactor.
|
||||||
// We don't want to reset e.g. the Votes.
|
// We don't want to reset e.g. the Votes, but we still want to
|
||||||
|
// signal the new round step, because other services (eg. mempool)
|
||||||
|
// depend on having an up-to-date peer state!
|
||||||
if !cs.state.IsEmpty() && (state.LastBlockHeight <= cs.state.LastBlockHeight) {
|
if !cs.state.IsEmpty() && (state.LastBlockHeight <= cs.state.LastBlockHeight) {
|
||||||
cs.Logger.Info("Ignoring updateToState()", "newHeight", state.LastBlockHeight+1, "oldHeight", cs.state.LastBlockHeight+1)
|
cs.Logger.Info("Ignoring updateToState()", "newHeight", state.LastBlockHeight+1, "oldHeight", cs.state.LastBlockHeight+1)
|
||||||
|
cs.newStep()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -492,6 +495,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
|
|||||||
} else {
|
} else {
|
||||||
cs.StartTime = cs.config.Commit(cs.CommitTime)
|
cs.StartTime = cs.config.Commit(cs.CommitTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
cs.Validators = validators
|
cs.Validators = validators
|
||||||
cs.Proposal = nil
|
cs.Proposal = nil
|
||||||
cs.ProposalBlock = nil
|
cs.ProposalBlock = nil
|
||||||
@@ -517,7 +521,7 @@ func (cs *ConsensusState) newStep() {
|
|||||||
rs := cs.RoundStateEvent()
|
rs := cs.RoundStateEvent()
|
||||||
cs.wal.Write(rs)
|
cs.wal.Write(rs)
|
||||||
cs.nSteps++
|
cs.nSteps++
|
||||||
// newStep is called by updateToStep in NewConsensusState before the eventBus is set!
|
// newStep is called by updateToState in NewConsensusState before the eventBus is set!
|
||||||
if cs.eventBus != nil {
|
if cs.eventBus != nil {
|
||||||
cs.eventBus.PublishEventNewRoundStep(rs)
|
cs.eventBus.PublishEventNewRoundStep(rs)
|
||||||
cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState)
|
cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState)
|
||||||
|
@@ -106,8 +106,8 @@ func (wal *baseWAL) OnStart() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (wal *baseWAL) OnStop() {
|
func (wal *baseWAL) OnStop() {
|
||||||
wal.BaseService.OnStop()
|
|
||||||
wal.group.Stop()
|
wal.group.Stop()
|
||||||
|
wal.group.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write is called in newStep and for each receive on the
|
// Write is called in newStep and for each receive on the
|
||||||
|
@@ -13,10 +13,10 @@ import (
|
|||||||
"github.com/tendermint/abci/example/kvstore"
|
"github.com/tendermint/abci/example/kvstore"
|
||||||
bc "github.com/tendermint/tendermint/blockchain"
|
bc "github.com/tendermint/tendermint/blockchain"
|
||||||
cfg "github.com/tendermint/tendermint/config"
|
cfg "github.com/tendermint/tendermint/config"
|
||||||
|
"github.com/tendermint/tendermint/privval"
|
||||||
"github.com/tendermint/tendermint/proxy"
|
"github.com/tendermint/tendermint/proxy"
|
||||||
sm "github.com/tendermint/tendermint/state"
|
sm "github.com/tendermint/tendermint/state"
|
||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
|
||||||
auto "github.com/tendermint/tmlibs/autofile"
|
auto "github.com/tendermint/tmlibs/autofile"
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
"github.com/tendermint/tmlibs/db"
|
"github.com/tendermint/tmlibs/db"
|
||||||
@@ -40,7 +40,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
|||||||
// COPY PASTE FROM node.go WITH A FEW MODIFICATIONS
|
// COPY PASTE FROM node.go WITH A FEW MODIFICATIONS
|
||||||
// NOTE: we can't import node package because of circular dependency
|
// NOTE: we can't import node package because of circular dependency
|
||||||
privValidatorFile := config.PrivValidatorFile()
|
privValidatorFile := config.PrivValidatorFile()
|
||||||
privValidator := pvm.LoadOrGenFilePV(privValidatorFile)
|
privValidator := privval.LoadOrGenFilePV(privValidatorFile)
|
||||||
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
|
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read genesis file")
|
return nil, errors.Wrap(err, "failed to read genesis file")
|
||||||
@@ -52,7 +52,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
|||||||
return nil, errors.Wrap(err, "failed to make genesis state")
|
return nil, errors.Wrap(err, "failed to make genesis state")
|
||||||
}
|
}
|
||||||
blockStore := bc.NewBlockStore(blockStoreDB)
|
blockStore := bc.NewBlockStore(blockStoreDB)
|
||||||
handshaker := NewHandshaker(stateDB, state, blockStore, genDoc.AppState())
|
handshaker := NewHandshaker(stateDB, state, blockStore, genDoc)
|
||||||
proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker)
|
proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker)
|
||||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||||
if err := proxyApp.Start(); err != nil {
|
if err := proxyApp.Start(); err != nil {
|
||||||
@@ -65,8 +65,8 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
|
|||||||
return nil, errors.Wrap(err, "failed to start event bus")
|
return nil, errors.Wrap(err, "failed to start event bus")
|
||||||
}
|
}
|
||||||
defer eventBus.Stop()
|
defer eventBus.Stop()
|
||||||
mempool := types.MockMempool{}
|
mempool := sm.MockMempool{}
|
||||||
evpool := types.MockEvidencePool{}
|
evpool := sm.MockEvidencePool{}
|
||||||
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
|
||||||
consensusState := NewConsensusState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
|
consensusState := NewConsensusState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
|
||||||
consensusState.SetLogger(logger)
|
consensusState.SetLogger(logger)
|
||||||
|
@@ -103,9 +103,6 @@ pex = true
|
|||||||
# Does not work if the peer-exchange reactor is disabled.
|
# Does not work if the peer-exchange reactor is disabled.
|
||||||
seed_mode = false
|
seed_mode = false
|
||||||
|
|
||||||
# Authenticated encryption
|
|
||||||
auth_enc = true
|
|
||||||
|
|
||||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||||
private_peer_ids = ""
|
private_peer_ids = ""
|
||||||
|
|
||||||
|
@@ -103,9 +103,6 @@ pex = true
|
|||||||
# Does not work if the peer-exchange reactor is disabled.
|
# Does not work if the peer-exchange reactor is disabled.
|
||||||
seed_mode = false
|
seed_mode = false
|
||||||
|
|
||||||
# Authenticated encryption
|
|
||||||
auth_enc = true
|
|
||||||
|
|
||||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||||
private_peer_ids = ""
|
private_peer_ids = ""
|
||||||
|
|
||||||
|
@@ -103,9 +103,6 @@ pex = true
|
|||||||
# Does not work if the peer-exchange reactor is disabled.
|
# Does not work if the peer-exchange reactor is disabled.
|
||||||
seed_mode = false
|
seed_mode = false
|
||||||
|
|
||||||
# Authenticated encryption
|
|
||||||
auth_enc = true
|
|
||||||
|
|
||||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||||
private_peer_ids = ""
|
private_peer_ids = ""
|
||||||
|
|
||||||
|
@@ -103,9 +103,6 @@ pex = true
|
|||||||
# Does not work if the peer-exchange reactor is disabled.
|
# Does not work if the peer-exchange reactor is disabled.
|
||||||
seed_mode = false
|
seed_mode = false
|
||||||
|
|
||||||
# Authenticated encryption
|
|
||||||
auth_enc = true
|
|
||||||
|
|
||||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||||
private_peer_ids = ""
|
private_peer_ids = ""
|
||||||
|
|
||||||
|
@@ -1,6 +1,13 @@
|
|||||||
Install Tendermint
|
Install Tendermint
|
||||||
==================
|
==================
|
||||||
|
|
||||||
|
The fastest and easiest way to install the ``tendermint`` binary
|
||||||
|
is to run `this script <https://github.com/tendermint/tendermint/blob/develop/scripts/install/install_tendermint_ubuntu.sh>`__ on
|
||||||
|
a fresh Ubuntu instance,
|
||||||
|
or `this script <https://github.com/tendermint/tendermint/blob/develop/scripts/install/install_tendermint_bsd.sh>`__
|
||||||
|
on a fresh FreeBSD instance. Read the comments / instructions carefully (i.e., reset your terminal after running the script,
|
||||||
|
make sure your okay with the network connections being made).
|
||||||
|
|
||||||
From Binary
|
From Binary
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
|
@@ -39,7 +39,7 @@ place of the public key. Here we list the concrete types, their names,
|
|||||||
and prefix bytes for public keys and signatures, as well as the address schemes
|
and prefix bytes for public keys and signatures, as well as the address schemes
|
||||||
for each PubKey. Note for brevity we don't
|
for each PubKey. Note for brevity we don't
|
||||||
include details of the private keys beyond their type and name, as they can be
|
include details of the private keys beyond their type and name, as they can be
|
||||||
derrived the same way as the others using Amino.
|
derived the same way as the others using Amino.
|
||||||
|
|
||||||
All registered objects are encoded by Amino using a 4-byte PrefixBytes that
|
All registered objects are encoded by Amino using a 4-byte PrefixBytes that
|
||||||
uniquely identifies the object and includes information about its underlying
|
uniquely identifies the object and includes information about its underlying
|
||||||
@@ -49,107 +49,60 @@ spec](https://github.com/tendermint/go-amino#computing-the-prefix-and-disambigua
|
|||||||
In what follows, we provide the type names and prefix bytes directly.
|
In what follows, we provide the type names and prefix bytes directly.
|
||||||
Notice that when encoding byte-arrays, the length of the byte-array is appended
|
Notice that when encoding byte-arrays, the length of the byte-array is appended
|
||||||
to the PrefixBytes. Thus the encoding of a byte array becomes `<PrefixBytes>
|
to the PrefixBytes. Thus the encoding of a byte array becomes `<PrefixBytes>
|
||||||
<Length> <ByteArray>`
|
<Length> <ByteArray>`. In other words, to encode any type listed below you do not need to be
|
||||||
|
familiar with amino encoding.
|
||||||
|
You can simply use below table and concatenate Prefix || Length (of raw bytes) || raw bytes
|
||||||
|
( while || stands for byte concatenation here).
|
||||||
|
|
||||||
NOTE: the remainder of this section on Public Key Cryptography can be generated
|
| Type | Name | Prefix | Length |
|
||||||
from [this script](https://github.com/tendermint/tendermint/blob/master/docs/spec/scripts/crypto.go)
|
| ---- | ---- | ------ | ----- |
|
||||||
|
| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE62 | 0x20 |
|
||||||
|
| PubKeyLedgerEd25519 | tendermint/PubKeyLedgerEd25519 | 0x5C3453B2 | 0x20 |
|
||||||
|
| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE982 | 0x21 |
|
||||||
|
| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288912 | 0x40 |
|
||||||
|
| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79A | 0x20 |
|
||||||
|
| PrivKeyLedgerSecp256k1 | tendermint/PrivKeyLedgerSecp256k1 | 0x10CAB393 | variable |
|
||||||
|
| PrivKeyLedgerEd25519 | tendermint/PrivKeyLedgerEd25519 | 0x0CFEEF9B | variable |
|
||||||
|
| SignatureEd25519 | tendermint/SignatureKeyEd25519 | 0x3DA1DB2A | 0x40 |
|
||||||
|
| SignatureSecp256k1 | tendermint/SignatureKeySecp256k1 | 0x16E1FEEA | variable |
|
||||||
|
|
||||||
### PubKeyEd25519
|
### Examples
|
||||||
|
|
||||||
```
|
1. For example, the 33-byte (or 0x21-byte in hex) Secp256k1 pubkey
|
||||||
// Name: tendermint/PubKeyEd25519
|
`020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9`
|
||||||
// PrefixBytes: 0x1624DE62
|
|
||||||
// Length: 0x20
|
|
||||||
// Notes: raw 32-byte Ed25519 pubkey
|
|
||||||
type PubKeyEd25519 [32]byte
|
|
||||||
|
|
||||||
func (pubkey PubKeyEd25519) Address() []byte {
|
|
||||||
// NOTE: hash of the Amino encoded bytes!
|
|
||||||
return RIPEMD160(AminoEncode(pubkey))
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, the 32-byte Ed25519 pubkey
|
|
||||||
`CCACD52F9B29D04393F01CD9AF6535455668115641F3D8BAEFD2295F24BAF60E` would be
|
|
||||||
encoded as
|
|
||||||
`1624DE6220CCACD52F9B29D04393F01CD9AF6535455668115641F3D8BAEFD2295F24BAF60E`.
|
|
||||||
|
|
||||||
The address would then be
|
|
||||||
`RIPEMD160(0x1624DE6220CCACD52F9B29D04393F01CD9AF6535455668115641F3D8BAEFD2295F24BAF60E)`
|
|
||||||
or `430FF75BAF1EC4B0D51BB3EEC2955479D0071605`
|
|
||||||
|
|
||||||
### SignatureEd25519
|
|
||||||
|
|
||||||
```
|
|
||||||
// Name: tendermint/SignatureKeyEd25519
|
|
||||||
// PrefixBytes: 0x3DA1DB2A
|
|
||||||
// Length: 0x40
|
|
||||||
// Notes: raw 64-byte Ed25519 signature
|
|
||||||
type SignatureEd25519 [64]byte
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, the 64-byte Ed25519 signature
|
|
||||||
`1B6034A8ED149D3C94FDA13EC03B26CC0FB264D9B0E47D3FA3DEF9FCDE658E49C80B35F9BE74949356401B15B18FB817D6E54495AD1C4A8401B248466CB0DB0B`
|
|
||||||
would be encoded as
|
would be encoded as
|
||||||
`3DA1DB2A401B6034A8ED149D3C94FDA13EC03B26CC0FB264D9B0E47D3FA3DEF9FCDE658E49C80B35F9BE74949356401B15B18FB817D6E54495AD1C4A8401B248466CB0DB0B`
|
|
||||||
|
|
||||||
### PrivKeyEd25519
|
|
||||||
|
|
||||||
```
|
|
||||||
// Name: tendermint/PrivKeyEd25519
|
|
||||||
// Notes: raw 32-byte priv key concatenated to raw 32-byte pub key
|
|
||||||
type PrivKeyEd25519 [64]byte
|
|
||||||
```
|
|
||||||
|
|
||||||
### PubKeySecp256k1
|
|
||||||
|
|
||||||
```
|
|
||||||
// Name: tendermint/PubKeySecp256k1
|
|
||||||
// PrefixBytes: 0xEB5AE982
|
|
||||||
// Length: 0x21
|
|
||||||
// Notes: OpenSSL compressed pubkey prefixed with 0x02 or 0x03
|
|
||||||
type PubKeySecp256k1 [33]byte
|
|
||||||
|
|
||||||
func (pubkey PubKeySecp256k1) Address() []byte {
|
|
||||||
// NOTE: hash of the raw pubkey bytes (not Amino encoded!).
|
|
||||||
// Compatible with Bitcoin addresses.
|
|
||||||
return RIPEMD160(SHA256(pubkey[:]))
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, the 33-byte Secp256k1 pubkey
|
|
||||||
`020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` would be
|
|
||||||
encoded as
|
|
||||||
`EB5AE98221020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9`
|
`EB5AE98221020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9`
|
||||||
|
|
||||||
The address would then be
|
2. For example, the variable size Secp256k1 signature (in this particular example 70 or 0x46 bytes)
|
||||||
`RIPEMD160(SHA256(0x020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9))`
|
|
||||||
or `0AE5BEE929ABE51BAD345DB925EEA652680783FC`
|
|
||||||
|
|
||||||
### SignatureSecp256k1
|
|
||||||
|
|
||||||
```
|
|
||||||
// Name: tendermint/SignatureKeySecp256k1
|
|
||||||
// PrefixBytes: 0x16E1FEEA
|
|
||||||
// Length: Variable
|
|
||||||
// Encoding prefix: Variable
|
|
||||||
// Notes: raw bytes of the Secp256k1 signature
|
|
||||||
type SignatureSecp256k1 []byte
|
|
||||||
```
|
|
||||||
|
|
||||||
For example, the Secp256k1 signature
|
|
||||||
`304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7`
|
`304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7`
|
||||||
would be encoded as
|
would be encoded as
|
||||||
`16E1FEEA46304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7`
|
`16E1FEEA46304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7`
|
||||||
|
|
||||||
### PrivKeySecp256k1
|
### Addresses
|
||||||
|
|
||||||
|
Addresses for each public key types are computed as follows:
|
||||||
|
|
||||||
|
#### Ed25519
|
||||||
|
|
||||||
|
RIPEMD160 hash of the Amino encoded public key:
|
||||||
|
|
||||||
```
|
```
|
||||||
// Name: tendermint/PrivKeySecp256k1
|
address = RIPEMD160(AMINO(pubkey))
|
||||||
// Notes: raw 32-byte priv key
|
|
||||||
type PrivKeySecp256k1 [32]byte
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
NOTE: this will soon change to the truncated 20-bytes of the SHA256 of the raw
|
||||||
|
public key
|
||||||
|
|
||||||
|
#### Secp256k1
|
||||||
|
|
||||||
|
RIPEMD160 hash of the SHA256 hash of the OpenSSL compressed public key:
|
||||||
|
|
||||||
|
```
|
||||||
|
address = RIPEMD160(SHA256(pubkey))
|
||||||
|
```
|
||||||
|
|
||||||
|
This is the same as Bitcoin.
|
||||||
|
|
||||||
## Other Common Types
|
## Other Common Types
|
||||||
|
|
||||||
### BitArray
|
### BitArray
|
||||||
|
@@ -52,20 +52,33 @@ objects in the `ResponseBeginBlock`:
|
|||||||
|
|
||||||
```
|
```
|
||||||
message Validator {
|
message Validator {
|
||||||
bytes pub_key = 1;
|
bytes address = 1;
|
||||||
int64 power = 2;
|
PubKey pub_key = 2;
|
||||||
|
int64 power = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message PubKey {
|
||||||
|
string type = 1;
|
||||||
|
bytes data = 2;
|
||||||
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The `pub_key` is the Amino encoded public key for the validator. For details on
|
The `pub_key` currently supports two types:
|
||||||
Amino encoded public keys, see the [section of the encoding spec](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md#public-key-cryptography).
|
- `type = "ed25519" and `data = <raw 32-byte public key>`
|
||||||
|
- `type = "secp256k1" and `data = <33-byte OpenSSL compressed public key>`
|
||||||
|
|
||||||
|
If the address is provided, it must match the address of the pubkey, as
|
||||||
|
specified [here](/docs/spec/blockchain/encoding.md#Addresses)
|
||||||
|
|
||||||
|
(Note: In the v0.19 series, the `pub_key` is the [Amino encoded public
|
||||||
|
key](/docs/spec/blockchain/encoding.md#public-key-cryptography).
|
||||||
For Ed25519 pubkeys, the Amino prefix is always "1624DE6220". For example, the 32-byte Ed25519 pubkey
|
For Ed25519 pubkeys, the Amino prefix is always "1624DE6220". For example, the 32-byte Ed25519 pubkey
|
||||||
`76852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85` would be
|
`76852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85` would be
|
||||||
Amino encoded as
|
Amino encoded as
|
||||||
`1624DE622076852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85`
|
`1624DE622076852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85`)
|
||||||
|
|
||||||
(Note: in old versions of Tendermint (pre-v0.19.0), the pubkey is just prefixed with a
|
(Note: In old versions of Tendermint (pre-v0.19.0), the pubkey is just prefixed with a
|
||||||
single type byte, so for ED25519 we'd have `pub_key = 0x1 | pub`)
|
single type byte, so for ED25519 we'd have `pub_key = 0x1 | pub`)
|
||||||
|
|
||||||
The `power` is the new voting power for the validator, with the
|
The `power` is the new voting power for the validator, with the
|
||||||
@@ -79,6 +92,19 @@ following rules:
|
|||||||
set with the given power
|
set with the given power
|
||||||
- if the validator does already exist, its power will be adjusted to the given power
|
- if the validator does already exist, its power will be adjusted to the given power
|
||||||
|
|
||||||
|
## InitChain Validator Updates
|
||||||
|
|
||||||
|
ResponseInitChain has the option to return a list of validators.
|
||||||
|
If the list is not empty, Tendermint will adopt it for the validator set.
|
||||||
|
This way the application can determine the initial validator set for the
|
||||||
|
blockchain.
|
||||||
|
|
||||||
|
Note that if addressses are included in the returned validators, they must match
|
||||||
|
the address of the public key.
|
||||||
|
|
||||||
|
ResponseInitChain also includes ConsensusParams, but these are presently
|
||||||
|
ignored.
|
||||||
|
|
||||||
## Query
|
## Query
|
||||||
|
|
||||||
Query is a generic message type with lots of flexibility to enable diverse sets
|
Query is a generic message type with lots of flexibility to enable diverse sets
|
||||||
@@ -94,7 +120,7 @@ using the following paths, with no additional data:
|
|||||||
|
|
||||||
- `/p2p/filter/addr/<IP:PORT>`, where `<IP:PORT>` denote the IP address and
|
- `/p2p/filter/addr/<IP:PORT>`, where `<IP:PORT>` denote the IP address and
|
||||||
the port of the connection
|
the port of the connection
|
||||||
- `p2p/filter/pubkey/<ID>`, where `<ID>` is the peer node ID (ie. the
|
- `p2p/filter/id/<ID>`, where `<ID>` is the peer node ID (ie. the
|
||||||
pubkey.Address() for the peer's PubKey)
|
pubkey.Address() for the peer's PubKey)
|
||||||
|
|
||||||
If either of these queries return a non-zero ABCI code, Tendermint will refuse
|
If either of these queries return a non-zero ABCI code, Tendermint will refuse
|
||||||
|
@@ -17,9 +17,6 @@ We will attempt to connect to the peer at IP:PORT, and verify,
|
|||||||
via authenticated encryption, that it is in possession of the private key
|
via authenticated encryption, that it is in possession of the private key
|
||||||
corresponding to `<ID>`. This prevents man-in-the-middle attacks on the peer layer.
|
corresponding to `<ID>`. This prevents man-in-the-middle attacks on the peer layer.
|
||||||
|
|
||||||
If `auth_enc = false`, peers can use an arbitrary ID, but they must always use
|
|
||||||
one. Authentication can then happen out-of-band of Tendermint, for instance via VPN.
|
|
||||||
|
|
||||||
## Connections
|
## Connections
|
||||||
|
|
||||||
All p2p connections use TCP.
|
All p2p connections use TCP.
|
||||||
|
@@ -122,9 +122,6 @@ like the file below, however, double check by inspecting the
|
|||||||
# Does not work if the peer-exchange reactor is disabled.
|
# Does not work if the peer-exchange reactor is disabled.
|
||||||
seed_mode = false
|
seed_mode = false
|
||||||
|
|
||||||
# Authenticated encryption
|
|
||||||
auth_enc = true
|
|
||||||
|
|
||||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||||
private_peer_ids = ""
|
private_peer_ids = ""
|
||||||
|
|
||||||
|
@@ -65,9 +65,7 @@ are connected to at least one validator.
|
|||||||
Config
|
Config
|
||||||
------
|
------
|
||||||
|
|
||||||
Authenticated encryption is enabled by default. If you wish to use another
|
Authenticated encryption is enabled by default.
|
||||||
authentication scheme or your peers are connected via VPN, you can turn it off
|
|
||||||
by setting ``auth_enc`` to ``false`` in the config file.
|
|
||||||
|
|
||||||
Additional Reading
|
Additional Reading
|
||||||
------------------
|
------------------
|
||||||
|
@@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
clist "github.com/tendermint/tmlibs/clist"
|
||||||
dbm "github.com/tendermint/tmlibs/db"
|
dbm "github.com/tendermint/tmlibs/db"
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
|
|
||||||
@@ -17,6 +18,7 @@ type EvidencePool struct {
|
|||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
|
||||||
evidenceStore *EvidenceStore
|
evidenceStore *EvidenceStore
|
||||||
|
evidenceList *clist.CList // concurrent linked-list of evidence
|
||||||
|
|
||||||
// needed to load validators to verify evidence
|
// needed to load validators to verify evidence
|
||||||
stateDB dbm.DB
|
stateDB dbm.DB
|
||||||
@@ -24,9 +26,6 @@ type EvidencePool struct {
|
|||||||
// latest state
|
// latest state
|
||||||
mtx sync.Mutex
|
mtx sync.Mutex
|
||||||
state sm.State
|
state sm.State
|
||||||
|
|
||||||
// never close
|
|
||||||
evidenceChan chan types.Evidence
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEvidencePool(stateDB dbm.DB, evidenceStore *EvidenceStore) *EvidencePool {
|
func NewEvidencePool(stateDB dbm.DB, evidenceStore *EvidenceStore) *EvidencePool {
|
||||||
@@ -35,21 +34,24 @@ func NewEvidencePool(stateDB dbm.DB, evidenceStore *EvidenceStore) *EvidencePool
|
|||||||
state: sm.LoadState(stateDB),
|
state: sm.LoadState(stateDB),
|
||||||
logger: log.NewNopLogger(),
|
logger: log.NewNopLogger(),
|
||||||
evidenceStore: evidenceStore,
|
evidenceStore: evidenceStore,
|
||||||
evidenceChan: make(chan types.Evidence),
|
evidenceList: clist.New(),
|
||||||
}
|
}
|
||||||
return evpool
|
return evpool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (evpool *EvidencePool) EvidenceFront() *clist.CElement {
|
||||||
|
return evpool.evidenceList.Front()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (evpool *EvidencePool) EvidenceWaitChan() <-chan struct{} {
|
||||||
|
return evpool.evidenceList.WaitChan()
|
||||||
|
}
|
||||||
|
|
||||||
// SetLogger sets the Logger.
|
// SetLogger sets the Logger.
|
||||||
func (evpool *EvidencePool) SetLogger(l log.Logger) {
|
func (evpool *EvidencePool) SetLogger(l log.Logger) {
|
||||||
evpool.logger = l
|
evpool.logger = l
|
||||||
}
|
}
|
||||||
|
|
||||||
// EvidenceChan returns an unbuffered channel on which new evidence can be received.
|
|
||||||
func (evpool *EvidencePool) EvidenceChan() <-chan types.Evidence {
|
|
||||||
return evpool.evidenceChan
|
|
||||||
}
|
|
||||||
|
|
||||||
// PriorityEvidence returns the priority evidence.
|
// PriorityEvidence returns the priority evidence.
|
||||||
func (evpool *EvidencePool) PriorityEvidence() []types.Evidence {
|
func (evpool *EvidencePool) PriorityEvidence() []types.Evidence {
|
||||||
return evpool.evidenceStore.PriorityEvidence()
|
return evpool.evidenceStore.PriorityEvidence()
|
||||||
@@ -68,22 +70,23 @@ func (evpool *EvidencePool) State() sm.State {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update loads the latest
|
// Update loads the latest
|
||||||
func (evpool *EvidencePool) Update(block *types.Block) {
|
func (evpool *EvidencePool) Update(block *types.Block, state sm.State) {
|
||||||
evpool.mtx.Lock()
|
|
||||||
defer evpool.mtx.Unlock()
|
|
||||||
|
|
||||||
state := sm.LoadState(evpool.stateDB)
|
// sanity check
|
||||||
if state.LastBlockHeight != block.Height {
|
if state.LastBlockHeight != block.Height {
|
||||||
panic(fmt.Sprintf("EvidencePool.Update: loaded state with height %d when block.Height=%d", state.LastBlockHeight, block.Height))
|
panic(fmt.Sprintf("Failed EvidencePool.Update sanity check: got state.Height=%d with block.Height=%d", state.LastBlockHeight, block.Height))
|
||||||
}
|
}
|
||||||
evpool.state = state
|
|
||||||
|
|
||||||
// NOTE: shouldn't need the mutex
|
// update the state
|
||||||
evpool.MarkEvidenceAsCommitted(block.Evidence.Evidence)
|
evpool.mtx.Lock()
|
||||||
|
evpool.state = state
|
||||||
|
evpool.mtx.Unlock()
|
||||||
|
|
||||||
|
// remove evidence from pending and mark committed
|
||||||
|
evpool.MarkEvidenceAsCommitted(block.Height, block.Evidence.Evidence)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddEvidence checks the evidence is valid and adds it to the pool.
|
// AddEvidence checks the evidence is valid and adds it to the pool.
|
||||||
// Blocks on the EvidenceChan.
|
|
||||||
func (evpool *EvidencePool) AddEvidence(evidence types.Evidence) (err error) {
|
func (evpool *EvidencePool) AddEvidence(evidence types.Evidence) (err error) {
|
||||||
|
|
||||||
// TODO: check if we already have evidence for this
|
// TODO: check if we already have evidence for this
|
||||||
@@ -107,14 +110,43 @@ func (evpool *EvidencePool) AddEvidence(evidence types.Evidence) (err error) {
|
|||||||
|
|
||||||
evpool.logger.Info("Verified new evidence of byzantine behaviour", "evidence", evidence)
|
evpool.logger.Info("Verified new evidence of byzantine behaviour", "evidence", evidence)
|
||||||
|
|
||||||
// never closes. always safe to send on
|
// add evidence to clist
|
||||||
evpool.evidenceChan <- evidence
|
evpool.evidenceList.PushBack(evidence)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarkEvidenceAsCommitted marks all the evidence as committed.
|
// MarkEvidenceAsCommitted marks all the evidence as committed and removes it from the queue.
|
||||||
func (evpool *EvidencePool) MarkEvidenceAsCommitted(evidence []types.Evidence) {
|
func (evpool *EvidencePool) MarkEvidenceAsCommitted(height int64, evidence []types.Evidence) {
|
||||||
|
// make a map of committed evidence to remove from the clist
|
||||||
|
blockEvidenceMap := make(map[string]struct{})
|
||||||
for _, ev := range evidence {
|
for _, ev := range evidence {
|
||||||
evpool.evidenceStore.MarkEvidenceAsCommitted(ev)
|
evpool.evidenceStore.MarkEvidenceAsCommitted(ev)
|
||||||
|
blockEvidenceMap[evMapKey(ev)] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove committed evidence from the clist
|
||||||
|
maxAge := evpool.State().ConsensusParams.EvidenceParams.MaxAge
|
||||||
|
evpool.removeEvidence(height, maxAge, blockEvidenceMap)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (evpool *EvidencePool) removeEvidence(height, maxAge int64, blockEvidenceMap map[string]struct{}) {
|
||||||
|
for e := evpool.evidenceList.Front(); e != nil; e = e.Next() {
|
||||||
|
ev := e.Value.(types.Evidence)
|
||||||
|
|
||||||
|
// Remove the evidence if it's already in a block
|
||||||
|
// or if it's now too old.
|
||||||
|
if _, ok := blockEvidenceMap[evMapKey(ev)]; ok ||
|
||||||
|
ev.Height() < height-maxAge {
|
||||||
|
|
||||||
|
// remove from clist
|
||||||
|
evpool.evidenceList.Remove(e)
|
||||||
|
e.DetachPrev()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func evMapKey(ev types.Evidence) string {
|
||||||
|
return string(ev.Hash())
|
||||||
|
}
|
||||||
|
@@ -45,7 +45,6 @@ func initializeValidatorState(valAddr []byte, height int64) dbm.DB {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEvidencePool(t *testing.T) {
|
func TestEvidencePool(t *testing.T) {
|
||||||
assert := assert.New(t)
|
|
||||||
|
|
||||||
valAddr := []byte("val1")
|
valAddr := []byte("val1")
|
||||||
height := int64(5)
|
height := int64(5)
|
||||||
@@ -56,26 +55,25 @@ func TestEvidencePool(t *testing.T) {
|
|||||||
goodEvidence := types.NewMockGoodEvidence(height, 0, valAddr)
|
goodEvidence := types.NewMockGoodEvidence(height, 0, valAddr)
|
||||||
badEvidence := types.MockBadEvidence{goodEvidence}
|
badEvidence := types.MockBadEvidence{goodEvidence}
|
||||||
|
|
||||||
|
// bad evidence
|
||||||
err := pool.AddEvidence(badEvidence)
|
err := pool.AddEvidence(badEvidence)
|
||||||
assert.NotNil(err)
|
assert.NotNil(t, err)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
<-pool.EvidenceChan()
|
<-pool.EvidenceWaitChan()
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err = pool.AddEvidence(goodEvidence)
|
err = pool.AddEvidence(goodEvidence)
|
||||||
assert.Nil(err)
|
assert.Nil(t, err)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
// if we send it again it wont fire on the chan
|
assert.Equal(t, 1, pool.evidenceList.Len())
|
||||||
|
|
||||||
|
// if we send it again, it shouldnt change the size
|
||||||
err = pool.AddEvidence(goodEvidence)
|
err = pool.AddEvidence(goodEvidence)
|
||||||
assert.Nil(err)
|
assert.Nil(t, err)
|
||||||
select {
|
assert.Equal(t, 1, pool.evidenceList.Len())
|
||||||
case <-pool.EvidenceChan():
|
|
||||||
t.Fatal("unexpected read on EvidenceChan")
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@@ -6,6 +6,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/tendermint/go-amino"
|
"github.com/tendermint/go-amino"
|
||||||
|
clist "github.com/tendermint/tmlibs/clist"
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
|
|
||||||
"github.com/tendermint/tendermint/p2p"
|
"github.com/tendermint/tendermint/p2p"
|
||||||
@@ -15,8 +16,10 @@ import (
|
|||||||
const (
|
const (
|
||||||
EvidenceChannel = byte(0x38)
|
EvidenceChannel = byte(0x38)
|
||||||
|
|
||||||
maxMsgSize = 1048576 // 1MB TODO make it configurable
|
maxMsgSize = 1048576 // 1MB TODO make it configurable
|
||||||
broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often
|
|
||||||
|
broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often
|
||||||
|
peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
|
||||||
)
|
)
|
||||||
|
|
||||||
// EvidenceReactor handles evpool evidence broadcasting amongst peers.
|
// EvidenceReactor handles evpool evidence broadcasting amongst peers.
|
||||||
@@ -43,11 +46,7 @@ func (evR *EvidenceReactor) SetLogger(l log.Logger) {
|
|||||||
|
|
||||||
// OnStart implements cmn.Service
|
// OnStart implements cmn.Service
|
||||||
func (evR *EvidenceReactor) OnStart() error {
|
func (evR *EvidenceReactor) OnStart() error {
|
||||||
if err := evR.BaseReactor.OnStart(); err != nil {
|
return evR.BaseReactor.OnStart()
|
||||||
return err
|
|
||||||
}
|
|
||||||
go evR.broadcastRoutine()
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetChannels implements Reactor.
|
// GetChannels implements Reactor.
|
||||||
@@ -63,14 +62,7 @@ func (evR *EvidenceReactor) GetChannels() []*p2p.ChannelDescriptor {
|
|||||||
|
|
||||||
// AddPeer implements Reactor.
|
// AddPeer implements Reactor.
|
||||||
func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) {
|
func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) {
|
||||||
// send the peer our high-priority evidence.
|
go evR.broadcastEvidenceRoutine(peer)
|
||||||
// the rest will be sent by the broadcastRoutine
|
|
||||||
evidences := evR.evpool.PriorityEvidence()
|
|
||||||
msg := &EvidenceListMessage{evidences}
|
|
||||||
success := peer.Send(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
|
|
||||||
if !success {
|
|
||||||
// TODO: remove peer ?
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemovePeer implements Reactor.
|
// RemovePeer implements Reactor.
|
||||||
@@ -109,30 +101,97 @@ func (evR *EvidenceReactor) SetEventBus(b *types.EventBus) {
|
|||||||
evR.eventBus = b
|
evR.eventBus = b
|
||||||
}
|
}
|
||||||
|
|
||||||
// Broadcast new evidence to all peers.
|
// Modeled after the mempool routine.
|
||||||
// Broadcasts must be non-blocking so routine is always available to read off EvidenceChan.
|
// - Evidence accumulates in a clist.
|
||||||
func (evR *EvidenceReactor) broadcastRoutine() {
|
// - Each peer has a routien that iterates through the clist,
|
||||||
ticker := time.NewTicker(time.Second * broadcastEvidenceIntervalS)
|
// sending available evidence to the peer.
|
||||||
|
// - If we're waiting for new evidence and the list is not empty,
|
||||||
|
// start iterating from the beginning again.
|
||||||
|
func (evR *EvidenceReactor) broadcastEvidenceRoutine(peer p2p.Peer) {
|
||||||
|
var next *clist.CElement
|
||||||
for {
|
for {
|
||||||
select {
|
// This happens because the CElement we were looking at got garbage
|
||||||
case evidence := <-evR.evpool.EvidenceChan():
|
// collected (removed). That is, .NextWait() returned nil. Go ahead and
|
||||||
// broadcast some new evidence
|
// start from the beginning.
|
||||||
msg := &EvidenceListMessage{[]types.Evidence{evidence}}
|
if next == nil {
|
||||||
evR.Switch.Broadcast(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
|
select {
|
||||||
|
case <-evR.evpool.EvidenceWaitChan(): // Wait until evidence is available
|
||||||
|
if next = evR.evpool.EvidenceFront(); next == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case <-peer.Quit():
|
||||||
|
return
|
||||||
|
case <-evR.Quit():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: Broadcast runs asynchronously, so this should wait on the successChan
|
ev := next.Value.(types.Evidence)
|
||||||
// in another routine before marking to be proper.
|
msg, retry := evR.checkSendEvidenceMessage(peer, ev)
|
||||||
evR.evpool.evidenceStore.MarkEvidenceAsBroadcasted(evidence)
|
if msg != nil {
|
||||||
case <-ticker.C:
|
success := peer.Send(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
|
||||||
// broadcast all pending evidence
|
retry = !success
|
||||||
msg := &EvidenceListMessage{evR.evpool.PendingEvidence()}
|
}
|
||||||
evR.Switch.Broadcast(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
|
|
||||||
|
if retry {
|
||||||
|
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
afterCh := time.After(time.Second * broadcastEvidenceIntervalS)
|
||||||
|
select {
|
||||||
|
case <-afterCh:
|
||||||
|
// start from the beginning every tick.
|
||||||
|
// TODO: only do this if we're at the end of the list!
|
||||||
|
next = nil
|
||||||
|
case <-next.NextWaitChan():
|
||||||
|
// see the start of the for loop for nil check
|
||||||
|
next = next.Next()
|
||||||
|
case <-peer.Quit():
|
||||||
|
return
|
||||||
case <-evR.Quit():
|
case <-evR.Quit():
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns the message to send the peer, or nil if the evidence is invalid for the peer.
|
||||||
|
// If message is nil, return true if we should sleep and try again.
|
||||||
|
func (evR EvidenceReactor) checkSendEvidenceMessage(peer p2p.Peer, ev types.Evidence) (msg EvidenceMessage, retry bool) {
|
||||||
|
|
||||||
|
// make sure the peer is up to date
|
||||||
|
evHeight := ev.Height()
|
||||||
|
peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
|
||||||
|
if !ok {
|
||||||
|
evR.Logger.Info("Found peer without PeerState", "peer", peer)
|
||||||
|
return nil, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: We only send evidence to peers where
|
||||||
|
// peerHeight - maxAge < evidenceHeight < peerHeight
|
||||||
|
maxAge := evR.evpool.State().ConsensusParams.EvidenceParams.MaxAge
|
||||||
|
peerHeight := peerState.GetHeight()
|
||||||
|
if peerHeight < evHeight {
|
||||||
|
// peer is behind. sleep while he catches up
|
||||||
|
return nil, true
|
||||||
|
} else if peerHeight > evHeight+maxAge {
|
||||||
|
// evidence is too old, skip
|
||||||
|
// NOTE: if evidence is too old for an honest peer,
|
||||||
|
// then we're behind and either it already got committed or it never will!
|
||||||
|
evR.Logger.Info("Not sending peer old evidence", "peerHeight", peerHeight, "evHeight", evHeight, "maxAge", maxAge, "peer", peer)
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// send evidence
|
||||||
|
msg = &EvidenceListMessage{[]types.Evidence{ev}}
|
||||||
|
return msg, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// PeerState describes the state of a peer.
|
||||||
|
type PeerState interface {
|
||||||
|
GetHeight() int64
|
||||||
|
}
|
||||||
|
|
||||||
//-----------------------------------------------------------------------------
|
//-----------------------------------------------------------------------------
|
||||||
// Messages
|
// Messages
|
||||||
|
|
||||||
|
@@ -84,7 +84,7 @@ func _waitForEvidence(t *testing.T, wg *sync.WaitGroup, evs types.EvidenceList,
|
|||||||
}
|
}
|
||||||
|
|
||||||
reapedEv := evpool.PendingEvidence()
|
reapedEv := evpool.PendingEvidence()
|
||||||
// put the reaped evidence is a map so we can quickly check we got everything
|
// put the reaped evidence in a map so we can quickly check we got everything
|
||||||
evMap := make(map[string]types.Evidence)
|
evMap := make(map[string]types.Evidence)
|
||||||
for _, e := range reapedEv {
|
for _, e := range reapedEv {
|
||||||
evMap[string(e.Hash())] = e
|
evMap[string(e.Hash())] = e
|
||||||
@@ -95,6 +95,7 @@ func _waitForEvidence(t *testing.T, wg *sync.WaitGroup, evs types.EvidenceList,
|
|||||||
fmt.Sprintf("evidence at index %d on reactor %d don't match: %v vs %v",
|
fmt.Sprintf("evidence at index %d on reactor %d don't match: %v vs %v",
|
||||||
i, reactorIdx, expectedEv, gotEv))
|
i, reactorIdx, expectedEv, gotEv))
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -110,7 +111,7 @@ func sendEvidence(t *testing.T, evpool *EvidencePool, valAddr []byte, n int) typ
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
NUM_EVIDENCE = 1
|
NUM_EVIDENCE = 10
|
||||||
TIMEOUT = 120 * time.Second // ridiculously high because CircleCI is slow
|
TIMEOUT = 120 * time.Second // ridiculously high because CircleCI is slow
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -130,8 +131,52 @@ func TestReactorBroadcastEvidence(t *testing.T) {
|
|||||||
// make reactors from statedb
|
// make reactors from statedb
|
||||||
reactors := makeAndConnectEvidenceReactors(config, stateDBs)
|
reactors := makeAndConnectEvidenceReactors(config, stateDBs)
|
||||||
|
|
||||||
|
// set the peer height on each reactor
|
||||||
|
for _, r := range reactors {
|
||||||
|
for _, peer := range r.Switch.Peers().List() {
|
||||||
|
ps := peerState{height}
|
||||||
|
peer.Set(types.PeerStateKey, ps)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// send a bunch of valid evidence to the first reactor's evpool
|
// send a bunch of valid evidence to the first reactor's evpool
|
||||||
// and wait for them all to be received in the others
|
// and wait for them all to be received in the others
|
||||||
evList := sendEvidence(t, reactors[0].evpool, valAddr, NUM_EVIDENCE)
|
evList := sendEvidence(t, reactors[0].evpool, valAddr, NUM_EVIDENCE)
|
||||||
waitForEvidence(t, evList, reactors)
|
waitForEvidence(t, evList, reactors)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type peerState struct {
|
||||||
|
height int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ps peerState) GetHeight() int64 {
|
||||||
|
return ps.height
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReactorSelectiveBroadcast(t *testing.T) {
|
||||||
|
config := cfg.TestConfig()
|
||||||
|
|
||||||
|
valAddr := []byte("myval")
|
||||||
|
height1 := int64(NUM_EVIDENCE) + 10
|
||||||
|
height2 := int64(NUM_EVIDENCE) / 2
|
||||||
|
|
||||||
|
// DB1 is ahead of DB2
|
||||||
|
stateDB1 := initializeValidatorState(valAddr, height1)
|
||||||
|
stateDB2 := initializeValidatorState(valAddr, height2)
|
||||||
|
|
||||||
|
// make reactors from statedb
|
||||||
|
reactors := makeAndConnectEvidenceReactors(config, []dbm.DB{stateDB1, stateDB2})
|
||||||
|
peer := reactors[0].Switch.Peers().List()[0]
|
||||||
|
ps := peerState{height2}
|
||||||
|
peer.Set(types.PeerStateKey, ps)
|
||||||
|
|
||||||
|
// send a bunch of valid evidence to the first reactor's evpool
|
||||||
|
evList := sendEvidence(t, reactors[0].evpool, valAddr, NUM_EVIDENCE)
|
||||||
|
|
||||||
|
// only ones less than the peers height should make it through
|
||||||
|
waitForEvidence(t, evList[:NUM_EVIDENCE/2], reactors[1:2])
|
||||||
|
|
||||||
|
// peers should still be connected
|
||||||
|
peers := reactors[1].Switch.Peers().List()
|
||||||
|
assert.Equal(t, 1, len(peers))
|
||||||
|
}
|
||||||
|
@@ -17,10 +17,6 @@ Impl:
|
|||||||
- First commit atomically in outqueue, pending, lookup.
|
- First commit atomically in outqueue, pending, lookup.
|
||||||
- Once broadcast, remove from outqueue. No need to sync
|
- Once broadcast, remove from outqueue. No need to sync
|
||||||
- Once committed, atomically remove from pending and update lookup.
|
- Once committed, atomically remove from pending and update lookup.
|
||||||
- TODO: If we crash after committed but before removing/updating,
|
|
||||||
we'll be stuck broadcasting evidence we never know we committed.
|
|
||||||
so either share the state db and atomically MarkCommitted
|
|
||||||
with ApplyBlock, or check all outqueue/pending on Start to see if its committed
|
|
||||||
|
|
||||||
Schema for indexing evidence (note you need both height and hash to find a piece of evidence):
|
Schema for indexing evidence (note you need both height and hash to find a piece of evidence):
|
||||||
|
|
||||||
@@ -164,7 +160,7 @@ func (store *EvidenceStore) MarkEvidenceAsBroadcasted(evidence types.Evidence) {
|
|||||||
store.db.Delete(key)
|
store.db.Delete(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarkEvidenceAsPending removes evidence from pending and outqueue and sets the state to committed.
|
// MarkEvidenceAsCommitted removes evidence from pending and outqueue and sets the state to committed.
|
||||||
func (store *EvidenceStore) MarkEvidenceAsCommitted(evidence types.Evidence) {
|
func (store *EvidenceStore) MarkEvidenceAsCommitted(evidence types.Evidence) {
|
||||||
// if its committed, its been broadcast
|
// if its committed, its been broadcast
|
||||||
store.MarkEvidenceAsBroadcasted(evidence)
|
store.MarkEvidenceAsBroadcasted(evidence)
|
||||||
|
@@ -72,8 +72,8 @@ type Mempool struct {
|
|||||||
rechecking int32 // for re-checking filtered txs on Update()
|
rechecking int32 // for re-checking filtered txs on Update()
|
||||||
recheckCursor *clist.CElement // next expected response
|
recheckCursor *clist.CElement // next expected response
|
||||||
recheckEnd *clist.CElement // re-checking stops here
|
recheckEnd *clist.CElement // re-checking stops here
|
||||||
notifiedTxsAvailable bool // true if fired on txsAvailable for this height
|
notifiedTxsAvailable bool
|
||||||
txsAvailable chan int64 // fires the next height once for each height, when the mempool is not empty
|
txsAvailable chan int64 // fires the next height once for each height, when the mempool is not empty
|
||||||
|
|
||||||
// Keep a cache of already-seen txs.
|
// Keep a cache of already-seen txs.
|
||||||
// This reduces the pressure on the proxyApp.
|
// This reduces the pressure on the proxyApp.
|
||||||
@@ -328,8 +328,12 @@ func (mem *Mempool) notifyTxsAvailable() {
|
|||||||
panic("notified txs available but mempool is empty!")
|
panic("notified txs available but mempool is empty!")
|
||||||
}
|
}
|
||||||
if mem.txsAvailable != nil && !mem.notifiedTxsAvailable {
|
if mem.txsAvailable != nil && !mem.notifiedTxsAvailable {
|
||||||
|
// channel cap is 1, so this will send once
|
||||||
|
select {
|
||||||
|
case mem.txsAvailable <- mem.height + 1:
|
||||||
|
default:
|
||||||
|
}
|
||||||
mem.notifiedTxsAvailable = true
|
mem.notifiedTxsAvailable = true
|
||||||
mem.txsAvailable <- mem.height + 1
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -382,7 +386,7 @@ func (mem *Mempool) Update(height int64, txs types.Txs) error {
|
|||||||
// Recheck mempool txs if any txs were committed in the block
|
// Recheck mempool txs if any txs were committed in the block
|
||||||
// NOTE/XXX: in some apps a tx could be invalidated due to EndBlock,
|
// NOTE/XXX: in some apps a tx could be invalidated due to EndBlock,
|
||||||
// so we really still do need to recheck, but this is for debugging
|
// so we really still do need to recheck, but this is for debugging
|
||||||
if mem.config.Recheck && (mem.config.RecheckEmpty || len(txs) > 0) {
|
if mem.config.Recheck && (mem.config.RecheckEmpty || len(goodTxs) > 0) {
|
||||||
mem.logger.Info("Recheck txs", "numtxs", len(goodTxs), "height", height)
|
mem.logger.Info("Recheck txs", "numtxs", len(goodTxs), "height", height)
|
||||||
mem.recheckTxs(goodTxs)
|
mem.recheckTxs(goodTxs)
|
||||||
// At this point, mem.txs are being rechecked.
|
// At this point, mem.txs are being rechecked.
|
||||||
@@ -474,6 +478,11 @@ func (cache *txCache) Push(tx types.Tx) bool {
|
|||||||
cache.mtx.Lock()
|
cache.mtx.Lock()
|
||||||
defer cache.mtx.Unlock()
|
defer cache.mtx.Unlock()
|
||||||
|
|
||||||
|
// if cache size is 0, do nothing
|
||||||
|
if cache.size == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
if _, exists := cache.map_[string(tx)]; exists {
|
if _, exists := cache.map_[string(tx)]; exists {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@@ -103,6 +103,7 @@ type PeerState interface {
|
|||||||
// Send new mempool txs to peer.
|
// Send new mempool txs to peer.
|
||||||
func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
|
func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
|
||||||
if !memR.config.Broadcast {
|
if !memR.config.Broadcast {
|
||||||
|
memR.Logger.Info("Tx broadcasting is disabled")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -129,7 +130,8 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
|
|||||||
height := memTx.Height()
|
height := memTx.Height()
|
||||||
if peerState_i := peer.Get(types.PeerStateKey); peerState_i != nil {
|
if peerState_i := peer.Get(types.PeerStateKey); peerState_i != nil {
|
||||||
peerState := peerState_i.(PeerState)
|
peerState := peerState_i.(PeerState)
|
||||||
if peerState.GetHeight() < height-1 { // Allow for a lag of 1 block
|
peerHeight := peerState.GetHeight()
|
||||||
|
if peerHeight < height-1 { // Allow for a lag of 1 block
|
||||||
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
|
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
17
node/node.go
17
node/node.go
@@ -21,6 +21,7 @@ import (
|
|||||||
mempl "github.com/tendermint/tendermint/mempool"
|
mempl "github.com/tendermint/tendermint/mempool"
|
||||||
"github.com/tendermint/tendermint/p2p"
|
"github.com/tendermint/tendermint/p2p"
|
||||||
"github.com/tendermint/tendermint/p2p/pex"
|
"github.com/tendermint/tendermint/p2p/pex"
|
||||||
|
"github.com/tendermint/tendermint/privval"
|
||||||
"github.com/tendermint/tendermint/proxy"
|
"github.com/tendermint/tendermint/proxy"
|
||||||
rpccore "github.com/tendermint/tendermint/rpc/core"
|
rpccore "github.com/tendermint/tendermint/rpc/core"
|
||||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||||
@@ -32,7 +33,6 @@ import (
|
|||||||
"github.com/tendermint/tendermint/state/txindex/kv"
|
"github.com/tendermint/tendermint/state/txindex/kv"
|
||||||
"github.com/tendermint/tendermint/state/txindex/null"
|
"github.com/tendermint/tendermint/state/txindex/null"
|
||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
|
||||||
"github.com/tendermint/tendermint/version"
|
"github.com/tendermint/tendermint/version"
|
||||||
|
|
||||||
_ "net/http/pprof"
|
_ "net/http/pprof"
|
||||||
@@ -77,7 +77,7 @@ type NodeProvider func(*cfg.Config, log.Logger) (*Node, error)
|
|||||||
// It implements NodeProvider.
|
// It implements NodeProvider.
|
||||||
func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
|
func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
|
||||||
return NewNode(config,
|
return NewNode(config,
|
||||||
pvm.LoadOrGenFilePV(config.PrivValidatorFile()),
|
privval.LoadOrGenFilePV(config.PrivValidatorFile()),
|
||||||
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
|
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
|
||||||
DefaultGenesisDocProviderFunc(config),
|
DefaultGenesisDocProviderFunc(config),
|
||||||
DefaultDBProvider,
|
DefaultDBProvider,
|
||||||
@@ -159,7 +159,7 @@ func NewNode(config *cfg.Config,
|
|||||||
// and sync tendermint and the app by performing a handshake
|
// and sync tendermint and the app by performing a handshake
|
||||||
// and replaying any necessary blocks
|
// and replaying any necessary blocks
|
||||||
consensusLogger := logger.With("module", "consensus")
|
consensusLogger := logger.With("module", "consensus")
|
||||||
handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc.AppState())
|
handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc)
|
||||||
handshaker.SetLogger(consensusLogger)
|
handshaker.SetLogger(consensusLogger)
|
||||||
proxyApp := proxy.NewAppConns(clientCreator, handshaker)
|
proxyApp := proxy.NewAppConns(clientCreator, handshaker)
|
||||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||||
@@ -177,8 +177,8 @@ func NewNode(config *cfg.Config,
|
|||||||
// TODO: persist this key so external signer
|
// TODO: persist this key so external signer
|
||||||
// can actually authenticate us
|
// can actually authenticate us
|
||||||
privKey = crypto.GenPrivKeyEd25519()
|
privKey = crypto.GenPrivKeyEd25519()
|
||||||
pvsc = pvm.NewSocketPV(
|
pvsc = privval.NewSocketPV(
|
||||||
logger.With("module", "pvm"),
|
logger.With("module", "privval"),
|
||||||
config.PrivValidatorListenAddr,
|
config.PrivValidatorListenAddr,
|
||||||
privKey,
|
privKey,
|
||||||
)
|
)
|
||||||
@@ -269,9 +269,6 @@ func NewNode(config *cfg.Config,
|
|||||||
// but it would still be nice to have a clear list of the current "PersistentPeers"
|
// but it would still be nice to have a clear list of the current "PersistentPeers"
|
||||||
// somewhere that we can return with net_info.
|
// somewhere that we can return with net_info.
|
||||||
//
|
//
|
||||||
// Let's assume we always have IDs ... and we just dont authenticate them
|
|
||||||
// if auth_enc=false.
|
|
||||||
//
|
|
||||||
// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
|
// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
|
||||||
// Note we currently use the addrBook regardless at least for AddOurAddress
|
// Note we currently use the addrBook regardless at least for AddOurAddress
|
||||||
addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
|
addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
|
||||||
@@ -305,7 +302,7 @@ func NewNode(config *cfg.Config,
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
sw.SetIDFilter(func(id p2p.ID) error {
|
sw.SetIDFilter(func(id p2p.ID) error {
|
||||||
resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/pubkey/%s", id)})
|
resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/id/%s", id)})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -450,7 +447,7 @@ func (n *Node) OnStop() {
|
|||||||
n.eventBus.Stop()
|
n.eventBus.Stop()
|
||||||
n.indexerService.Stop()
|
n.indexerService.Stop()
|
||||||
|
|
||||||
if pvsc, ok := n.privValidator.(*pvm.SocketPV); ok {
|
if pvsc, ok := n.privValidator.(*privval.SocketPV); ok {
|
||||||
if err := pvsc.Stop(); err != nil {
|
if err := pvsc.Stop(); err != nil {
|
||||||
n.Logger.Error("Error stopping priv validator socket client", "err", err)
|
n.Logger.Error("Error stopping priv validator socket client", "err", err)
|
||||||
}
|
}
|
||||||
|
@@ -83,7 +83,7 @@ type MConnection struct {
|
|||||||
onReceive receiveCbFunc
|
onReceive receiveCbFunc
|
||||||
onError errorCbFunc
|
onError errorCbFunc
|
||||||
errored uint32
|
errored uint32
|
||||||
config *MConnConfig
|
config MConnConfig
|
||||||
|
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled.
|
flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled.
|
||||||
@@ -121,8 +121,8 @@ func (cfg *MConnConfig) maxPacketMsgTotalSize() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DefaultMConnConfig returns the default config.
|
// DefaultMConnConfig returns the default config.
|
||||||
func DefaultMConnConfig() *MConnConfig {
|
func DefaultMConnConfig() MConnConfig {
|
||||||
return &MConnConfig{
|
return MConnConfig{
|
||||||
SendRate: defaultSendRate,
|
SendRate: defaultSendRate,
|
||||||
RecvRate: defaultRecvRate,
|
RecvRate: defaultRecvRate,
|
||||||
MaxPacketMsgPayloadSize: maxPacketMsgPayloadSizeDefault,
|
MaxPacketMsgPayloadSize: maxPacketMsgPayloadSizeDefault,
|
||||||
@@ -143,7 +143,7 @@ func NewMConnection(conn net.Conn, chDescs []*ChannelDescriptor, onReceive recei
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config
|
// NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config
|
||||||
func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc, config *MConnConfig) *MConnection {
|
func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc, config MConnConfig) *MConnection {
|
||||||
if config.PongTimeout >= config.PingInterval {
|
if config.PongTimeout >= config.PingInterval {
|
||||||
panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)")
|
panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)")
|
||||||
}
|
}
|
||||||
@@ -545,9 +545,7 @@ FOR_LOOP:
|
|||||||
// not goroutine-safe
|
// not goroutine-safe
|
||||||
func (c *MConnection) stopPongTimer() {
|
func (c *MConnection) stopPongTimer() {
|
||||||
if c.pongTimer != nil {
|
if c.pongTimer != nil {
|
||||||
if !c.pongTimer.Stop() {
|
_ = c.pongTimer.Stop()
|
||||||
<-c.pongTimer.C
|
|
||||||
}
|
|
||||||
c.pongTimer = nil
|
c.pongTimer = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -6,9 +6,11 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/fortytw2/leaktest"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/tendermint/go-amino"
|
|
||||||
|
amino "github.com/tendermint/go-amino"
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -242,7 +244,11 @@ func TestMConnectionMultiplePings(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMConnectionPingPongs(t *testing.T) {
|
func TestMConnectionPingPongs(t *testing.T) {
|
||||||
|
// check that we are not leaking any go-routines
|
||||||
|
defer leaktest.CheckTimeout(t, 10*time.Second)()
|
||||||
|
|
||||||
server, client := net.Pipe()
|
server, client := net.Pipe()
|
||||||
|
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
defer client.Close()
|
defer client.Close()
|
||||||
|
|
||||||
|
48
p2p/fuzz.go
48
p2p/fuzz.go
@@ -5,16 +5,10 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/tendermint/tendermint/config"
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
// FuzzModeDrop is a mode in which we randomly drop reads/writes, connections or sleep
|
|
||||||
FuzzModeDrop = iota
|
|
||||||
// FuzzModeDelay is a mode in which we randomly sleep
|
|
||||||
FuzzModeDelay
|
|
||||||
)
|
|
||||||
|
|
||||||
// FuzzedConnection wraps any net.Conn and depending on the mode either delays
|
// FuzzedConnection wraps any net.Conn and depending on the mode either delays
|
||||||
// reads/writes or randomly drops reads/writes/connections.
|
// reads/writes or randomly drops reads/writes/connections.
|
||||||
type FuzzedConnection struct {
|
type FuzzedConnection struct {
|
||||||
@@ -24,37 +18,17 @@ type FuzzedConnection struct {
|
|||||||
start <-chan time.Time
|
start <-chan time.Time
|
||||||
active bool
|
active bool
|
||||||
|
|
||||||
config *FuzzConnConfig
|
config *config.FuzzConnConfig
|
||||||
}
|
|
||||||
|
|
||||||
// FuzzConnConfig is a FuzzedConnection configuration.
|
|
||||||
type FuzzConnConfig struct {
|
|
||||||
Mode int
|
|
||||||
MaxDelay time.Duration
|
|
||||||
ProbDropRW float64
|
|
||||||
ProbDropConn float64
|
|
||||||
ProbSleep float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultFuzzConnConfig returns the default config.
|
|
||||||
func DefaultFuzzConnConfig() *FuzzConnConfig {
|
|
||||||
return &FuzzConnConfig{
|
|
||||||
Mode: FuzzModeDrop,
|
|
||||||
MaxDelay: 3 * time.Second,
|
|
||||||
ProbDropRW: 0.2,
|
|
||||||
ProbDropConn: 0.00,
|
|
||||||
ProbSleep: 0.00,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FuzzConn creates a new FuzzedConnection. Fuzzing starts immediately.
|
// FuzzConn creates a new FuzzedConnection. Fuzzing starts immediately.
|
||||||
func FuzzConn(conn net.Conn) net.Conn {
|
func FuzzConn(conn net.Conn) net.Conn {
|
||||||
return FuzzConnFromConfig(conn, DefaultFuzzConnConfig())
|
return FuzzConnFromConfig(conn, config.DefaultFuzzConnConfig())
|
||||||
}
|
}
|
||||||
|
|
||||||
// FuzzConnFromConfig creates a new FuzzedConnection from a config. Fuzzing
|
// FuzzConnFromConfig creates a new FuzzedConnection from a config. Fuzzing
|
||||||
// starts immediately.
|
// starts immediately.
|
||||||
func FuzzConnFromConfig(conn net.Conn, config *FuzzConnConfig) net.Conn {
|
func FuzzConnFromConfig(conn net.Conn, config *config.FuzzConnConfig) net.Conn {
|
||||||
return &FuzzedConnection{
|
return &FuzzedConnection{
|
||||||
conn: conn,
|
conn: conn,
|
||||||
start: make(<-chan time.Time),
|
start: make(<-chan time.Time),
|
||||||
@@ -66,12 +40,16 @@ func FuzzConnFromConfig(conn net.Conn, config *FuzzConnConfig) net.Conn {
|
|||||||
// FuzzConnAfter creates a new FuzzedConnection. Fuzzing starts when the
|
// FuzzConnAfter creates a new FuzzedConnection. Fuzzing starts when the
|
||||||
// duration elapses.
|
// duration elapses.
|
||||||
func FuzzConnAfter(conn net.Conn, d time.Duration) net.Conn {
|
func FuzzConnAfter(conn net.Conn, d time.Duration) net.Conn {
|
||||||
return FuzzConnAfterFromConfig(conn, d, DefaultFuzzConnConfig())
|
return FuzzConnAfterFromConfig(conn, d, config.DefaultFuzzConnConfig())
|
||||||
}
|
}
|
||||||
|
|
||||||
// FuzzConnAfterFromConfig creates a new FuzzedConnection from a config.
|
// FuzzConnAfterFromConfig creates a new FuzzedConnection from a config.
|
||||||
// Fuzzing starts when the duration elapses.
|
// Fuzzing starts when the duration elapses.
|
||||||
func FuzzConnAfterFromConfig(conn net.Conn, d time.Duration, config *FuzzConnConfig) net.Conn {
|
func FuzzConnAfterFromConfig(
|
||||||
|
conn net.Conn,
|
||||||
|
d time.Duration,
|
||||||
|
config *config.FuzzConnConfig,
|
||||||
|
) net.Conn {
|
||||||
return &FuzzedConnection{
|
return &FuzzedConnection{
|
||||||
conn: conn,
|
conn: conn,
|
||||||
start: time.After(d),
|
start: time.After(d),
|
||||||
@@ -81,7 +59,7 @@ func FuzzConnAfterFromConfig(conn net.Conn, d time.Duration, config *FuzzConnCon
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Config returns the connection's config.
|
// Config returns the connection's config.
|
||||||
func (fc *FuzzedConnection) Config() *FuzzConnConfig {
|
func (fc *FuzzedConnection) Config() *config.FuzzConnConfig {
|
||||||
return fc.config
|
return fc.config
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -136,7 +114,7 @@ func (fc *FuzzedConnection) fuzz() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch fc.config.Mode {
|
switch fc.config.Mode {
|
||||||
case FuzzModeDrop:
|
case config.FuzzModeDrop:
|
||||||
// randomly drop the r/w, drop the conn, or sleep
|
// randomly drop the r/w, drop the conn, or sleep
|
||||||
r := cmn.RandFloat64()
|
r := cmn.RandFloat64()
|
||||||
if r <= fc.config.ProbDropRW {
|
if r <= fc.config.ProbDropRW {
|
||||||
@@ -149,7 +127,7 @@ func (fc *FuzzedConnection) fuzz() bool {
|
|||||||
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep {
|
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep {
|
||||||
time.Sleep(fc.randomDuration())
|
time.Sleep(fc.randomDuration())
|
||||||
}
|
}
|
||||||
case FuzzModeDelay:
|
case config.FuzzModeDelay:
|
||||||
// sleep a bit
|
// sleep a bit
|
||||||
time.Sleep(fc.randomDuration())
|
time.Sleep(fc.randomDuration())
|
||||||
}
|
}
|
||||||
|
179
p2p/peer.go
179
p2p/peer.go
@@ -10,10 +10,11 @@ import (
|
|||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
|
|
||||||
|
"github.com/tendermint/tendermint/config"
|
||||||
tmconn "github.com/tendermint/tendermint/p2p/conn"
|
tmconn "github.com/tendermint/tendermint/p2p/conn"
|
||||||
)
|
)
|
||||||
|
|
||||||
var testIPSuffix uint32 = 0
|
var testIPSuffix uint32
|
||||||
|
|
||||||
// Peer is an interface representing a peer connected on a reactor.
|
// Peer is an interface representing a peer connected on a reactor.
|
||||||
type Peer interface {
|
type Peer interface {
|
||||||
@@ -39,7 +40,7 @@ type Peer interface {
|
|||||||
type peerConn struct {
|
type peerConn struct {
|
||||||
outbound bool
|
outbound bool
|
||||||
persistent bool
|
persistent bool
|
||||||
config *PeerConfig
|
config *config.P2PConfig
|
||||||
conn net.Conn // source connection
|
conn net.Conn // source connection
|
||||||
ip net.IP
|
ip net.IP
|
||||||
}
|
}
|
||||||
@@ -99,110 +100,107 @@ type peer struct {
|
|||||||
Data *cmn.CMap
|
Data *cmn.CMap
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPeer(pc peerConn, nodeInfo NodeInfo,
|
func newPeer(
|
||||||
reactorsByCh map[byte]Reactor, chDescs []*tmconn.ChannelDescriptor,
|
pc peerConn,
|
||||||
onPeerError func(Peer, interface{})) *peer {
|
mConfig tmconn.MConnConfig,
|
||||||
|
nodeInfo NodeInfo,
|
||||||
|
reactorsByCh map[byte]Reactor,
|
||||||
|
chDescs []*tmconn.ChannelDescriptor,
|
||||||
|
onPeerError func(Peer, interface{}),
|
||||||
|
) *peer {
|
||||||
p := &peer{
|
p := &peer{
|
||||||
peerConn: pc,
|
peerConn: pc,
|
||||||
nodeInfo: nodeInfo,
|
nodeInfo: nodeInfo,
|
||||||
channels: nodeInfo.Channels,
|
channels: nodeInfo.Channels,
|
||||||
Data: cmn.NewCMap(),
|
Data: cmn.NewCMap(),
|
||||||
}
|
}
|
||||||
p.mconn = createMConnection(pc.conn, p, reactorsByCh, chDescs, onPeerError, pc.config.MConfig)
|
|
||||||
|
p.mconn = createMConnection(
|
||||||
|
pc.conn,
|
||||||
|
p,
|
||||||
|
reactorsByCh,
|
||||||
|
chDescs,
|
||||||
|
onPeerError,
|
||||||
|
mConfig,
|
||||||
|
)
|
||||||
p.BaseService = *cmn.NewBaseService(nil, "Peer", p)
|
p.BaseService = *cmn.NewBaseService(nil, "Peer", p)
|
||||||
|
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
// PeerConfig is a Peer configuration.
|
func newOutboundPeerConn(
|
||||||
type PeerConfig struct {
|
addr *NetAddress,
|
||||||
AuthEnc bool `mapstructure:"auth_enc"` // authenticated encryption
|
config *config.P2PConfig,
|
||||||
|
persistent bool,
|
||||||
// times are in seconds
|
ourNodePrivKey crypto.PrivKey,
|
||||||
HandshakeTimeout time.Duration `mapstructure:"handshake_timeout"`
|
) (peerConn, error) {
|
||||||
DialTimeout time.Duration `mapstructure:"dial_timeout"`
|
|
||||||
|
|
||||||
MConfig *tmconn.MConnConfig `mapstructure:"connection"`
|
|
||||||
|
|
||||||
DialFail bool `mapstructure:"dial_fail"` // for testing
|
|
||||||
Fuzz bool `mapstructure:"fuzz"` // fuzz connection (for testing)
|
|
||||||
FuzzConfig *FuzzConnConfig `mapstructure:"fuzz_config"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultPeerConfig returns the default config.
|
|
||||||
func DefaultPeerConfig() *PeerConfig {
|
|
||||||
return &PeerConfig{
|
|
||||||
AuthEnc: true,
|
|
||||||
HandshakeTimeout: 20, // * time.Second,
|
|
||||||
DialTimeout: 3, // * time.Second,
|
|
||||||
MConfig: tmconn.DefaultMConnConfig(),
|
|
||||||
DialFail: false,
|
|
||||||
Fuzz: false,
|
|
||||||
FuzzConfig: DefaultFuzzConnConfig(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newOutboundPeerConn(addr *NetAddress, config *PeerConfig, persistent bool, ourNodePrivKey crypto.PrivKey) (peerConn, error) {
|
|
||||||
var pc peerConn
|
|
||||||
|
|
||||||
conn, err := dial(addr, config)
|
conn, err := dial(addr, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pc, cmn.ErrorWrap(err, "Error creating peer")
|
return peerConn{}, cmn.ErrorWrap(err, "Error creating peer")
|
||||||
}
|
}
|
||||||
|
|
||||||
pc, err = newPeerConn(conn, config, true, persistent, ourNodePrivKey)
|
pc, err := newPeerConn(conn, config, true, persistent, ourNodePrivKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err2 := conn.Close(); err2 != nil {
|
if cerr := conn.Close(); cerr != nil {
|
||||||
return pc, cmn.ErrorWrap(err, err2.Error())
|
return peerConn{}, cmn.ErrorWrap(err, cerr.Error())
|
||||||
}
|
}
|
||||||
return pc, err
|
return peerConn{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensure dialed ID matches connection ID
|
// ensure dialed ID matches connection ID
|
||||||
if config.AuthEnc && addr.ID != pc.ID() {
|
if addr.ID != pc.ID() {
|
||||||
if err2 := conn.Close(); err2 != nil {
|
if cerr := conn.Close(); cerr != nil {
|
||||||
return pc, cmn.ErrorWrap(err, err2.Error())
|
return peerConn{}, cmn.ErrorWrap(err, cerr.Error())
|
||||||
}
|
}
|
||||||
return pc, ErrSwitchAuthenticationFailure{addr, pc.ID()}
|
return peerConn{}, ErrSwitchAuthenticationFailure{addr, pc.ID()}
|
||||||
}
|
}
|
||||||
|
|
||||||
return pc, nil
|
return pc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newInboundPeerConn(conn net.Conn, config *PeerConfig, ourNodePrivKey crypto.PrivKey) (peerConn, error) {
|
func newInboundPeerConn(
|
||||||
|
conn net.Conn,
|
||||||
|
config *config.P2PConfig,
|
||||||
|
ourNodePrivKey crypto.PrivKey,
|
||||||
|
) (peerConn, error) {
|
||||||
|
|
||||||
// TODO: issue PoW challenge
|
// TODO: issue PoW challenge
|
||||||
|
|
||||||
return newPeerConn(conn, config, false, false, ourNodePrivKey)
|
return newPeerConn(conn, config, false, false, ourNodePrivKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPeerConn(rawConn net.Conn,
|
func newPeerConn(
|
||||||
config *PeerConfig, outbound, persistent bool,
|
rawConn net.Conn,
|
||||||
ourNodePrivKey crypto.PrivKey) (pc peerConn, err error) {
|
cfg *config.P2PConfig,
|
||||||
|
outbound, persistent bool,
|
||||||
|
ourNodePrivKey crypto.PrivKey,
|
||||||
|
) (pc peerConn, err error) {
|
||||||
conn := rawConn
|
conn := rawConn
|
||||||
|
|
||||||
// Fuzz connection
|
// Fuzz connection
|
||||||
if config.Fuzz {
|
if cfg.TestFuzz {
|
||||||
// so we have time to do peer handshakes and get set up
|
// so we have time to do peer handshakes and get set up
|
||||||
conn = FuzzConnAfterFromConfig(conn, 10*time.Second, config.FuzzConfig)
|
conn = FuzzConnAfterFromConfig(conn, 10*time.Second, cfg.TestFuzzConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.AuthEnc {
|
// Set deadline for secret handshake
|
||||||
// Set deadline for secret handshake
|
dl := time.Now().Add(cfg.HandshakeTimeout)
|
||||||
if err := conn.SetDeadline(time.Now().Add(config.HandshakeTimeout * time.Second)); err != nil {
|
if err := conn.SetDeadline(dl); err != nil {
|
||||||
return pc, cmn.ErrorWrap(err, "Error setting deadline while encrypting connection")
|
return pc, cmn.ErrorWrap(
|
||||||
}
|
err,
|
||||||
|
"Error setting deadline while encrypting connection",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// Encrypt connection
|
// Encrypt connection
|
||||||
conn, err = tmconn.MakeSecretConnection(conn, ourNodePrivKey)
|
conn, err = tmconn.MakeSecretConnection(conn, ourNodePrivKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pc, cmn.ErrorWrap(err, "Error creating peer")
|
return pc, cmn.ErrorWrap(err, "Error creating peer")
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only the information we already have
|
// Only the information we already have
|
||||||
return peerConn{
|
return peerConn{
|
||||||
config: config,
|
config: cfg,
|
||||||
outbound: outbound,
|
outbound: outbound,
|
||||||
persistent: persistent,
|
persistent: persistent,
|
||||||
conn: conn,
|
conn: conn,
|
||||||
@@ -305,22 +303,33 @@ func (p *peer) hasChannel(chID byte) bool {
|
|||||||
}
|
}
|
||||||
// NOTE: probably will want to remove this
|
// NOTE: probably will want to remove this
|
||||||
// but could be helpful while the feature is new
|
// but could be helpful while the feature is new
|
||||||
p.Logger.Debug("Unknown channel for peer", "channel", chID, "channels", p.channels)
|
p.Logger.Debug(
|
||||||
|
"Unknown channel for peer",
|
||||||
|
"channel",
|
||||||
|
chID,
|
||||||
|
"channels",
|
||||||
|
p.channels,
|
||||||
|
)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------------------------
|
//---------------------------------------------------
|
||||||
// methods used by the Switch
|
// methods used by the Switch
|
||||||
|
|
||||||
// CloseConn should be called by the Switch if the peer was created but never started.
|
// CloseConn should be called by the Switch if the peer was created but never
|
||||||
|
// started.
|
||||||
func (pc *peerConn) CloseConn() {
|
func (pc *peerConn) CloseConn() {
|
||||||
pc.conn.Close() // nolint: errcheck
|
pc.conn.Close() // nolint: errcheck
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandshakeTimeout performs the Tendermint P2P handshake between a given node and the peer
|
// HandshakeTimeout performs the Tendermint P2P handshake between a given node
|
||||||
// by exchanging their NodeInfo. It sets the received nodeInfo on the peer.
|
// and the peer by exchanging their NodeInfo. It sets the received nodeInfo on
|
||||||
|
// the peer.
|
||||||
// NOTE: blocking
|
// NOTE: blocking
|
||||||
func (pc *peerConn) HandshakeTimeout(ourNodeInfo NodeInfo, timeout time.Duration) (peerNodeInfo NodeInfo, err error) {
|
func (pc *peerConn) HandshakeTimeout(
|
||||||
|
ourNodeInfo NodeInfo,
|
||||||
|
timeout time.Duration,
|
||||||
|
) (peerNodeInfo NodeInfo, err error) {
|
||||||
// Set deadline for handshake so we don't block forever on conn.ReadFull
|
// Set deadline for handshake so we don't block forever on conn.ReadFull
|
||||||
if err := pc.conn.SetDeadline(time.Now().Add(timeout)); err != nil {
|
if err := pc.conn.SetDeadline(time.Now().Add(timeout)); err != nil {
|
||||||
return peerNodeInfo, cmn.ErrorWrap(err, "Error setting deadline")
|
return peerNodeInfo, cmn.ErrorWrap(err, "Error setting deadline")
|
||||||
@@ -332,7 +341,11 @@ func (pc *peerConn) HandshakeTimeout(ourNodeInfo NodeInfo, timeout time.Duration
|
|||||||
return
|
return
|
||||||
},
|
},
|
||||||
func(_ int) (val interface{}, err error, abort bool) {
|
func(_ int) (val interface{}, err error, abort bool) {
|
||||||
_, err = cdc.UnmarshalBinaryReader(pc.conn, &peerNodeInfo, int64(MaxNodeInfoSize()))
|
_, err = cdc.UnmarshalBinaryReader(
|
||||||
|
pc.conn,
|
||||||
|
&peerNodeInfo,
|
||||||
|
int64(MaxNodeInfoSize()),
|
||||||
|
)
|
||||||
return
|
return
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -373,20 +386,26 @@ func (p *peer) String() string {
|
|||||||
//------------------------------------------------------------------
|
//------------------------------------------------------------------
|
||||||
// helper funcs
|
// helper funcs
|
||||||
|
|
||||||
func dial(addr *NetAddress, config *PeerConfig) (net.Conn, error) {
|
func dial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) {
|
||||||
if config.DialFail {
|
if cfg.TestDialFail {
|
||||||
return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)")
|
return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)")
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := addr.DialTimeout(config.DialTimeout * time.Second)
|
conn, err := addr.DialTimeout(cfg.DialTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createMConnection(conn net.Conn, p *peer, reactorsByCh map[byte]Reactor, chDescs []*tmconn.ChannelDescriptor,
|
func createMConnection(
|
||||||
onPeerError func(Peer, interface{}), config *tmconn.MConnConfig) *tmconn.MConnection {
|
conn net.Conn,
|
||||||
|
p *peer,
|
||||||
|
reactorsByCh map[byte]Reactor,
|
||||||
|
chDescs []*tmconn.ChannelDescriptor,
|
||||||
|
onPeerError func(Peer, interface{}),
|
||||||
|
config tmconn.MConnConfig,
|
||||||
|
) *tmconn.MConnection {
|
||||||
|
|
||||||
onReceive := func(chID byte, msgBytes []byte) {
|
onReceive := func(chID byte, msgBytes []byte) {
|
||||||
reactor := reactorsByCh[chID]
|
reactor := reactorsByCh[chID]
|
||||||
@@ -402,5 +421,11 @@ func createMConnection(conn net.Conn, p *peer, reactorsByCh map[byte]Reactor, ch
|
|||||||
onPeerError(p, r)
|
onPeerError(p, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
return tmconn.NewMConnectionWithConfig(conn, chDescs, onReceive, onError, config)
|
return tmconn.NewMConnectionWithConfig(
|
||||||
|
conn,
|
||||||
|
chDescs,
|
||||||
|
onReceive,
|
||||||
|
onError,
|
||||||
|
config,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
@@ -10,9 +10,11 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
crypto "github.com/tendermint/go-crypto"
|
crypto "github.com/tendermint/go-crypto"
|
||||||
tmconn "github.com/tendermint/tendermint/p2p/conn"
|
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
|
|
||||||
|
"github.com/tendermint/tendermint/config"
|
||||||
|
tmconn "github.com/tendermint/tendermint/p2p/conn"
|
||||||
)
|
)
|
||||||
|
|
||||||
const testCh = 0x01
|
const testCh = 0x01
|
||||||
@@ -21,11 +23,11 @@ func TestPeerBasic(t *testing.T) {
|
|||||||
assert, require := assert.New(t), require.New(t)
|
assert, require := assert.New(t), require.New(t)
|
||||||
|
|
||||||
// simulate remote peer
|
// simulate remote peer
|
||||||
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: DefaultPeerConfig()}
|
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: cfg}
|
||||||
rp.Start()
|
rp.Start()
|
||||||
defer rp.Stop()
|
defer rp.Stop()
|
||||||
|
|
||||||
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), DefaultPeerConfig())
|
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig())
|
||||||
require.Nil(err)
|
require.Nil(err)
|
||||||
|
|
||||||
err = p.Start()
|
err = p.Start()
|
||||||
@@ -41,39 +43,17 @@ func TestPeerBasic(t *testing.T) {
|
|||||||
assert.Equal(rp.ID(), p.ID())
|
assert.Equal(rp.ID(), p.ID())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPeerWithoutAuthEnc(t *testing.T) {
|
|
||||||
assert, require := assert.New(t), require.New(t)
|
|
||||||
|
|
||||||
config := DefaultPeerConfig()
|
|
||||||
config.AuthEnc = false
|
|
||||||
|
|
||||||
// simulate remote peer
|
|
||||||
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config}
|
|
||||||
rp.Start()
|
|
||||||
defer rp.Stop()
|
|
||||||
|
|
||||||
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config)
|
|
||||||
require.Nil(err)
|
|
||||||
|
|
||||||
err = p.Start()
|
|
||||||
require.Nil(err)
|
|
||||||
defer p.Stop()
|
|
||||||
|
|
||||||
assert.True(p.IsRunning())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPeerSend(t *testing.T) {
|
func TestPeerSend(t *testing.T) {
|
||||||
assert, require := assert.New(t), require.New(t)
|
assert, require := assert.New(t), require.New(t)
|
||||||
|
|
||||||
config := DefaultPeerConfig()
|
config := cfg
|
||||||
config.AuthEnc = false
|
|
||||||
|
|
||||||
// simulate remote peer
|
// simulate remote peer
|
||||||
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config}
|
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config}
|
||||||
rp.Start()
|
rp.Start()
|
||||||
defer rp.Stop()
|
defer rp.Stop()
|
||||||
|
|
||||||
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config)
|
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig())
|
||||||
require.Nil(err)
|
require.Nil(err)
|
||||||
|
|
||||||
err = p.Start()
|
err = p.Start()
|
||||||
@@ -85,7 +65,11 @@ func TestPeerSend(t *testing.T) {
|
|||||||
assert.True(p.Send(testCh, []byte("Asylum")))
|
assert.True(p.Send(testCh, []byte("Asylum")))
|
||||||
}
|
}
|
||||||
|
|
||||||
func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig) (*peer, error) {
|
func createOutboundPeerAndPerformHandshake(
|
||||||
|
addr *NetAddress,
|
||||||
|
config *config.P2PConfig,
|
||||||
|
mConfig tmconn.MConnConfig,
|
||||||
|
) (*peer, error) {
|
||||||
chDescs := []*tmconn.ChannelDescriptor{
|
chDescs := []*tmconn.ChannelDescriptor{
|
||||||
{ID: testCh, Priority: 1},
|
{ID: testCh, Priority: 1},
|
||||||
}
|
}
|
||||||
@@ -106,14 +90,14 @@ func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig)
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
p := newPeer(pc, nodeInfo, reactorsByCh, chDescs, func(p Peer, r interface{}) {})
|
p := newPeer(pc, mConfig, nodeInfo, reactorsByCh, chDescs, func(p Peer, r interface{}) {})
|
||||||
p.SetLogger(log.TestingLogger().With("peer", addr))
|
p.SetLogger(log.TestingLogger().With("peer", addr))
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type remotePeer struct {
|
type remotePeer struct {
|
||||||
PrivKey crypto.PrivKey
|
PrivKey crypto.PrivKey
|
||||||
Config *PeerConfig
|
Config *config.P2PConfig
|
||||||
addr *NetAddress
|
addr *NetAddress
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
channels cmn.HexBytes
|
channels cmn.HexBytes
|
||||||
|
@@ -267,7 +267,7 @@ func (r *PEXReactor) receiveRequest(src Peer) error {
|
|||||||
now := time.Now()
|
now := time.Now()
|
||||||
minInterval := r.minReceiveRequestInterval()
|
minInterval := r.minReceiveRequestInterval()
|
||||||
if now.Sub(lastReceived) < minInterval {
|
if now.Sub(lastReceived) < minInterval {
|
||||||
return fmt.Errorf("Peer (%v) send next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting",
|
return fmt.Errorf("Peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting",
|
||||||
src.ID(),
|
src.ID(),
|
||||||
lastReceived,
|
lastReceived,
|
||||||
now,
|
now,
|
||||||
|
@@ -13,21 +13,22 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
crypto "github.com/tendermint/go-crypto"
|
crypto "github.com/tendermint/go-crypto"
|
||||||
cfg "github.com/tendermint/tendermint/config"
|
|
||||||
"github.com/tendermint/tendermint/p2p"
|
|
||||||
"github.com/tendermint/tendermint/p2p/conn"
|
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
|
|
||||||
|
"github.com/tendermint/tendermint/config"
|
||||||
|
"github.com/tendermint/tendermint/p2p"
|
||||||
|
"github.com/tendermint/tendermint/p2p/conn"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
config *cfg.P2PConfig
|
cfg *config.P2PConfig
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
config = cfg.DefaultP2PConfig()
|
cfg = config.DefaultP2PConfig()
|
||||||
config.PexReactor = true
|
cfg.PexReactor = true
|
||||||
config.AllowDuplicateIP = true
|
cfg.AllowDuplicateIP = true
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPEXReactorBasic(t *testing.T) {
|
func TestPEXReactorBasic(t *testing.T) {
|
||||||
@@ -49,7 +50,6 @@ func TestPEXReactorAddRemovePeer(t *testing.T) {
|
|||||||
assert.Equal(t, size+1, book.Size())
|
assert.Equal(t, size+1, book.Size())
|
||||||
|
|
||||||
r.RemovePeer(peer, "peer not available")
|
r.RemovePeer(peer, "peer not available")
|
||||||
assert.Equal(t, size+1, book.Size())
|
|
||||||
|
|
||||||
outboundPeer := p2p.CreateRandomPeer(true)
|
outboundPeer := p2p.CreateRandomPeer(true)
|
||||||
|
|
||||||
@@ -57,7 +57,6 @@ func TestPEXReactorAddRemovePeer(t *testing.T) {
|
|||||||
assert.Equal(t, size+1, book.Size(), "outbound peers should not be added to the address book")
|
assert.Equal(t, size+1, book.Size(), "outbound peers should not be added to the address book")
|
||||||
|
|
||||||
r.RemovePeer(outboundPeer, "peer not available")
|
r.RemovePeer(outboundPeer, "peer not available")
|
||||||
assert.Equal(t, size+1, book.Size())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- FAIL: TestPEXReactorRunning (11.10s)
|
// --- FAIL: TestPEXReactorRunning (11.10s)
|
||||||
@@ -84,7 +83,7 @@ func TestPEXReactorRunning(t *testing.T) {
|
|||||||
|
|
||||||
// create switches
|
// create switches
|
||||||
for i := 0; i < N; i++ {
|
for i := 0; i < N; i++ {
|
||||||
switches[i] = p2p.MakeSwitch(config, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch {
|
switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch {
|
||||||
books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false)
|
books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false)
|
||||||
books[i].SetLogger(logger.With("pex", i))
|
books[i].SetLogger(logger.With("pex", i))
|
||||||
sw.SetAddrBook(books[i])
|
sw.SetAddrBook(books[i])
|
||||||
@@ -212,7 +211,7 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
|
|||||||
|
|
||||||
// 1. create seed
|
// 1. create seed
|
||||||
seed := p2p.MakeSwitch(
|
seed := p2p.MakeSwitch(
|
||||||
config,
|
cfg,
|
||||||
0,
|
0,
|
||||||
"127.0.0.1",
|
"127.0.0.1",
|
||||||
"123.123.123",
|
"123.123.123",
|
||||||
@@ -242,7 +241,7 @@ func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
|
|||||||
|
|
||||||
// 2. create usual peer with only seed configured.
|
// 2. create usual peer with only seed configured.
|
||||||
peer := p2p.MakeSwitch(
|
peer := p2p.MakeSwitch(
|
||||||
config,
|
cfg,
|
||||||
1,
|
1,
|
||||||
"127.0.0.1",
|
"127.0.0.1",
|
||||||
"123.123.123",
|
"123.123.123",
|
||||||
@@ -428,7 +427,7 @@ func assertPeersWithTimeout(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createReactor(config *PEXReactorConfig) (r *PEXReactor, book *addrBook) {
|
func createReactor(conf *PEXReactorConfig) (r *PEXReactor, book *addrBook) {
|
||||||
// directory to store address book
|
// directory to store address book
|
||||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -437,7 +436,7 @@ func createReactor(config *PEXReactorConfig) (r *PEXReactor, book *addrBook) {
|
|||||||
book = NewAddrBook(filepath.Join(dir, "addrbook.json"), true)
|
book = NewAddrBook(filepath.Join(dir, "addrbook.json"), true)
|
||||||
book.SetLogger(log.TestingLogger())
|
book.SetLogger(log.TestingLogger())
|
||||||
|
|
||||||
r = NewPEXReactor(book, config)
|
r = NewPEXReactor(book, conf)
|
||||||
r.SetLogger(log.TestingLogger())
|
r.SetLogger(log.TestingLogger())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -450,7 +449,7 @@ func teardownReactor(book *addrBook) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch {
|
func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch {
|
||||||
sw := p2p.MakeSwitch(config, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw })
|
sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw })
|
||||||
sw.SetLogger(log.TestingLogger())
|
sw.SetLogger(log.TestingLogger())
|
||||||
for _, r := range reactors {
|
for _, r := range reactors {
|
||||||
sw.AddReactor(r.String(), r)
|
sw.AddReactor(r.String(), r)
|
||||||
|
@@ -7,7 +7,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cfg "github.com/tendermint/tendermint/config"
|
"github.com/tendermint/tendermint/config"
|
||||||
"github.com/tendermint/tendermint/p2p/conn"
|
"github.com/tendermint/tendermint/p2p/conn"
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
)
|
)
|
||||||
@@ -55,8 +55,7 @@ type AddrBook interface {
|
|||||||
type Switch struct {
|
type Switch struct {
|
||||||
cmn.BaseService
|
cmn.BaseService
|
||||||
|
|
||||||
config *cfg.P2PConfig
|
config *config.P2PConfig
|
||||||
peerConfig *PeerConfig
|
|
||||||
listeners []Listener
|
listeners []Listener
|
||||||
reactors map[string]Reactor
|
reactors map[string]Reactor
|
||||||
chDescs []*conn.ChannelDescriptor
|
chDescs []*conn.ChannelDescriptor
|
||||||
@@ -71,14 +70,15 @@ type Switch struct {
|
|||||||
filterConnByAddr func(net.Addr) error
|
filterConnByAddr func(net.Addr) error
|
||||||
filterConnByID func(ID) error
|
filterConnByID func(ID) error
|
||||||
|
|
||||||
|
mConfig conn.MConnConfig
|
||||||
|
|
||||||
rng *cmn.Rand // seed for randomizing dial times and orders
|
rng *cmn.Rand // seed for randomizing dial times and orders
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSwitch creates a new Switch with the given config.
|
// NewSwitch creates a new Switch with the given config.
|
||||||
func NewSwitch(config *cfg.P2PConfig) *Switch {
|
func NewSwitch(cfg *config.P2PConfig) *Switch {
|
||||||
sw := &Switch{
|
sw := &Switch{
|
||||||
config: config,
|
config: cfg,
|
||||||
peerConfig: DefaultPeerConfig(),
|
|
||||||
reactors: make(map[string]Reactor),
|
reactors: make(map[string]Reactor),
|
||||||
chDescs: make([]*conn.ChannelDescriptor, 0),
|
chDescs: make([]*conn.ChannelDescriptor, 0),
|
||||||
reactorsByCh: make(map[byte]Reactor),
|
reactorsByCh: make(map[byte]Reactor),
|
||||||
@@ -90,12 +90,13 @@ func NewSwitch(config *cfg.P2PConfig) *Switch {
|
|||||||
// Ensure we have a completely undeterministic PRNG.
|
// Ensure we have a completely undeterministic PRNG.
|
||||||
sw.rng = cmn.NewRand()
|
sw.rng = cmn.NewRand()
|
||||||
|
|
||||||
// TODO: collapse the peerConfig into the config ?
|
mConfig := conn.DefaultMConnConfig()
|
||||||
sw.peerConfig.MConfig.FlushThrottle = time.Duration(config.FlushThrottleTimeout) * time.Millisecond
|
mConfig.FlushThrottle = time.Duration(cfg.FlushThrottleTimeout) * time.Millisecond
|
||||||
sw.peerConfig.MConfig.SendRate = config.SendRate
|
mConfig.SendRate = cfg.SendRate
|
||||||
sw.peerConfig.MConfig.RecvRate = config.RecvRate
|
mConfig.RecvRate = cfg.RecvRate
|
||||||
sw.peerConfig.MConfig.MaxPacketMsgPayloadSize = config.MaxPacketMsgPayloadSize
|
mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize
|
||||||
sw.peerConfig.AuthEnc = config.AuthEnc
|
|
||||||
|
sw.mConfig = mConfig
|
||||||
|
|
||||||
sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw)
|
sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw)
|
||||||
return sw
|
return sw
|
||||||
@@ -420,7 +421,7 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b
|
|||||||
func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) error {
|
func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) error {
|
||||||
sw.dialing.Set(string(addr.ID), addr)
|
sw.dialing.Set(string(addr.ID), addr)
|
||||||
defer sw.dialing.Delete(string(addr.ID))
|
defer sw.dialing.Delete(string(addr.ID))
|
||||||
return sw.addOutboundPeerWithConfig(addr, sw.peerConfig, persistent)
|
return sw.addOutboundPeerWithConfig(addr, sw.config, persistent)
|
||||||
}
|
}
|
||||||
|
|
||||||
// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds]
|
// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds]
|
||||||
@@ -477,7 +478,7 @@ func (sw *Switch) listenerRoutine(l Listener) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New inbound connection!
|
// New inbound connection!
|
||||||
err := sw.addInboundPeerWithConfig(inConn, sw.peerConfig)
|
err := sw.addInboundPeerWithConfig(inConn, sw.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sw.Logger.Info("Ignoring inbound connection: error while adding peer", "address", inConn.RemoteAddr().String(), "err", err)
|
sw.Logger.Info("Ignoring inbound connection: error while adding peer", "address", inConn.RemoteAddr().String(), "err", err)
|
||||||
continue
|
continue
|
||||||
@@ -487,7 +488,10 @@ func (sw *Switch) listenerRoutine(l Listener) {
|
|||||||
// cleanup
|
// cleanup
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sw *Switch) addInboundPeerWithConfig(conn net.Conn, config *PeerConfig) error {
|
func (sw *Switch) addInboundPeerWithConfig(
|
||||||
|
conn net.Conn,
|
||||||
|
config *config.P2PConfig,
|
||||||
|
) error {
|
||||||
peerConn, err := newInboundPeerConn(conn, config, sw.nodeKey.PrivKey)
|
peerConn, err := newInboundPeerConn(conn, config, sw.nodeKey.PrivKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.Close() // peer is nil
|
conn.Close() // peer is nil
|
||||||
@@ -504,10 +508,20 @@ func (sw *Switch) addInboundPeerWithConfig(conn net.Conn, config *PeerConfig) er
|
|||||||
// dial the peer; make secret connection; authenticate against the dialed ID;
|
// dial the peer; make secret connection; authenticate against the dialed ID;
|
||||||
// add the peer.
|
// add the peer.
|
||||||
// if dialing fails, start the reconnect loop. If handhsake fails, its over.
|
// if dialing fails, start the reconnect loop. If handhsake fails, its over.
|
||||||
// If peer is started succesffuly, reconnectLoop will start when StopPeerForError is called
|
// If peer is started succesffuly, reconnectLoop will start when
|
||||||
func (sw *Switch) addOutboundPeerWithConfig(addr *NetAddress, config *PeerConfig, persistent bool) error {
|
// StopPeerForError is called
|
||||||
|
func (sw *Switch) addOutboundPeerWithConfig(
|
||||||
|
addr *NetAddress,
|
||||||
|
config *config.P2PConfig,
|
||||||
|
persistent bool,
|
||||||
|
) error {
|
||||||
sw.Logger.Info("Dialing peer", "address", addr)
|
sw.Logger.Info("Dialing peer", "address", addr)
|
||||||
peerConn, err := newOutboundPeerConn(addr, config, persistent, sw.nodeKey.PrivKey)
|
peerConn, err := newOutboundPeerConn(
|
||||||
|
addr,
|
||||||
|
config,
|
||||||
|
persistent,
|
||||||
|
sw.nodeKey.PrivKey,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if persistent {
|
if persistent {
|
||||||
go sw.reconnectToPeer(addr)
|
go sw.reconnectToPeer(addr)
|
||||||
@@ -526,7 +540,8 @@ func (sw *Switch) addOutboundPeerWithConfig(addr *NetAddress, config *PeerConfig
|
|||||||
// that already has a SecretConnection. If all goes well,
|
// that already has a SecretConnection. If all goes well,
|
||||||
// it starts the peer and adds it to the switch.
|
// it starts the peer and adds it to the switch.
|
||||||
// NOTE: This performs a blocking handshake before the peer is added.
|
// NOTE: This performs a blocking handshake before the peer is added.
|
||||||
// NOTE: If error is returned, caller is responsible for calling peer.CloseConn()
|
// NOTE: If error is returned, caller is responsible for calling
|
||||||
|
// peer.CloseConn()
|
||||||
func (sw *Switch) addPeer(pc peerConn) error {
|
func (sw *Switch) addPeer(pc peerConn) error {
|
||||||
|
|
||||||
addr := pc.conn.RemoteAddr()
|
addr := pc.conn.RemoteAddr()
|
||||||
@@ -534,12 +549,8 @@ func (sw *Switch) addPeer(pc peerConn) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: if AuthEnc==false, we don't have a peerID until after the handshake.
|
|
||||||
// If AuthEnc==true then we already know the ID and could do the checks first before the handshake,
|
|
||||||
// but it's simple to just deal with both cases the same after the handshake.
|
|
||||||
|
|
||||||
// Exchange NodeInfo on the conn
|
// Exchange NodeInfo on the conn
|
||||||
peerNodeInfo, err := pc.HandshakeTimeout(sw.nodeInfo, time.Duration(sw.peerConfig.HandshakeTimeout*time.Second))
|
peerNodeInfo, err := pc.HandshakeTimeout(sw.nodeInfo, time.Duration(sw.config.HandshakeTimeout))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -547,13 +558,14 @@ func (sw *Switch) addPeer(pc peerConn) error {
|
|||||||
peerID := peerNodeInfo.ID
|
peerID := peerNodeInfo.ID
|
||||||
|
|
||||||
// ensure connection key matches self reported key
|
// ensure connection key matches self reported key
|
||||||
if pc.config.AuthEnc {
|
connID := pc.ID()
|
||||||
connID := pc.ID()
|
|
||||||
|
|
||||||
if peerID != connID {
|
if peerID != connID {
|
||||||
return fmt.Errorf("nodeInfo.ID() (%v) doesn't match conn.ID() (%v)",
|
return fmt.Errorf(
|
||||||
peerID, connID)
|
"nodeInfo.ID() (%v) doesn't match conn.ID() (%v)",
|
||||||
}
|
peerID,
|
||||||
|
connID,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate the peers nodeInfo
|
// Validate the peers nodeInfo
|
||||||
@@ -593,7 +605,7 @@ func (sw *Switch) addPeer(pc peerConn) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
peer := newPeer(pc, peerNodeInfo, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError)
|
peer := newPeer(pc, sw.mConfig, peerNodeInfo, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError)
|
||||||
peer.SetLogger(sw.Logger.With("peer", addr))
|
peer.SetLogger(sw.Logger.With("peer", addr))
|
||||||
|
|
||||||
peer.Logger.Info("Successful handshake with peer", "peerNodeInfo", peerNodeInfo)
|
peer.Logger.Info("Successful handshake with peer", "peerNodeInfo", peerNodeInfo)
|
||||||
|
@@ -14,18 +14,18 @@ import (
|
|||||||
crypto "github.com/tendermint/go-crypto"
|
crypto "github.com/tendermint/go-crypto"
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
|
|
||||||
cfg "github.com/tendermint/tendermint/config"
|
"github.com/tendermint/tendermint/config"
|
||||||
"github.com/tendermint/tendermint/p2p/conn"
|
"github.com/tendermint/tendermint/p2p/conn"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
config *cfg.P2PConfig
|
cfg *config.P2PConfig
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
config = cfg.DefaultP2PConfig()
|
cfg = config.DefaultP2PConfig()
|
||||||
config.PexReactor = true
|
cfg.PexReactor = true
|
||||||
config.AllowDuplicateIP = true
|
cfg.AllowDuplicateIP = true
|
||||||
}
|
}
|
||||||
|
|
||||||
type PeerMessage struct {
|
type PeerMessage struct {
|
||||||
@@ -85,7 +85,7 @@ func (tr *TestReactor) getMsgs(chID byte) []PeerMessage {
|
|||||||
// XXX: note this uses net.Pipe and not a proper TCP conn
|
// XXX: note this uses net.Pipe and not a proper TCP conn
|
||||||
func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) {
|
func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) {
|
||||||
// Create two switches that will be interconnected.
|
// Create two switches that will be interconnected.
|
||||||
switches := MakeConnectedSwitches(config, 2, initSwitch, Connect2Switches)
|
switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches)
|
||||||
return switches[0], switches[1]
|
return switches[0], switches[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -152,8 +152,8 @@ func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, r
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestConnAddrFilter(t *testing.T) {
|
func TestConnAddrFilter(t *testing.T) {
|
||||||
s1 := MakeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
|
s1 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
|
||||||
s2 := MakeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
|
s2 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
|
||||||
defer s1.Stop()
|
defer s1.Stop()
|
||||||
defer s2.Stop()
|
defer s2.Stop()
|
||||||
|
|
||||||
@@ -181,14 +181,14 @@ func TestConnAddrFilter(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSwitchFiltersOutItself(t *testing.T) {
|
func TestSwitchFiltersOutItself(t *testing.T) {
|
||||||
s1 := MakeSwitch(config, 1, "127.0.0.1", "123.123.123", initSwitchFunc)
|
s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc)
|
||||||
// addr := s1.NodeInfo().NetAddress()
|
// addr := s1.NodeInfo().NetAddress()
|
||||||
|
|
||||||
// // add ourselves like we do in node.go#427
|
// // add ourselves like we do in node.go#427
|
||||||
// s1.addrBook.AddOurAddress(addr)
|
// s1.addrBook.AddOurAddress(addr)
|
||||||
|
|
||||||
// simulate s1 having a public IP by creating a remote peer with the same ID
|
// simulate s1 having a public IP by creating a remote peer with the same ID
|
||||||
rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: DefaultPeerConfig()}
|
rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg}
|
||||||
rp.Start()
|
rp.Start()
|
||||||
|
|
||||||
// addr should be rejected in addPeer based on the same ID
|
// addr should be rejected in addPeer based on the same ID
|
||||||
@@ -214,8 +214,8 @@ func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestConnIDFilter(t *testing.T) {
|
func TestConnIDFilter(t *testing.T) {
|
||||||
s1 := MakeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
|
s1 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
|
||||||
s2 := MakeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
|
s2 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
|
||||||
defer s1.Stop()
|
defer s1.Stop()
|
||||||
defer s2.Stop()
|
defer s2.Stop()
|
||||||
|
|
||||||
@@ -251,7 +251,7 @@ func TestConnIDFilter(t *testing.T) {
|
|||||||
func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
|
func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
|
||||||
assert, require := assert.New(t), require.New(t)
|
assert, require := assert.New(t), require.New(t)
|
||||||
|
|
||||||
sw := MakeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
|
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
|
||||||
err := sw.Start()
|
err := sw.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
@@ -259,11 +259,11 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
|
|||||||
defer sw.Stop()
|
defer sw.Stop()
|
||||||
|
|
||||||
// simulate remote peer
|
// simulate remote peer
|
||||||
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: DefaultPeerConfig()}
|
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: cfg}
|
||||||
rp.Start()
|
rp.Start()
|
||||||
defer rp.Stop()
|
defer rp.Stop()
|
||||||
|
|
||||||
pc, err := newOutboundPeerConn(rp.Addr(), DefaultPeerConfig(), false, sw.nodeKey.PrivKey)
|
pc, err := newOutboundPeerConn(rp.Addr(), cfg, false, sw.nodeKey.PrivKey)
|
||||||
require.Nil(err)
|
require.Nil(err)
|
||||||
err = sw.addPeer(pc)
|
err = sw.addPeer(pc)
|
||||||
require.Nil(err)
|
require.Nil(err)
|
||||||
@@ -281,7 +281,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
|
|||||||
func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
|
func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
|
||||||
assert, require := assert.New(t), require.New(t)
|
assert, require := assert.New(t), require.New(t)
|
||||||
|
|
||||||
sw := MakeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
|
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
|
||||||
err := sw.Start()
|
err := sw.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
@@ -289,11 +289,11 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
|
|||||||
defer sw.Stop()
|
defer sw.Stop()
|
||||||
|
|
||||||
// simulate remote peer
|
// simulate remote peer
|
||||||
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: DefaultPeerConfig()}
|
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: cfg}
|
||||||
rp.Start()
|
rp.Start()
|
||||||
defer rp.Stop()
|
defer rp.Stop()
|
||||||
|
|
||||||
pc, err := newOutboundPeerConn(rp.Addr(), DefaultPeerConfig(), true, sw.nodeKey.PrivKey)
|
pc, err := newOutboundPeerConn(rp.Addr(), cfg, true, sw.nodeKey.PrivKey)
|
||||||
// sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodeKey.PrivKey,
|
// sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodeKey.PrivKey,
|
||||||
require.Nil(err)
|
require.Nil(err)
|
||||||
|
|
||||||
@@ -320,7 +320,7 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
|
|||||||
// simulate another remote peer
|
// simulate another remote peer
|
||||||
rp = &remotePeer{
|
rp = &remotePeer{
|
||||||
PrivKey: crypto.GenPrivKeyEd25519(),
|
PrivKey: crypto.GenPrivKeyEd25519(),
|
||||||
Config: DefaultPeerConfig(),
|
Config: cfg,
|
||||||
// Use different interface to prevent duplicate IP filter, this will break
|
// Use different interface to prevent duplicate IP filter, this will break
|
||||||
// beyond two peers.
|
// beyond two peers.
|
||||||
listenAddr: "127.0.0.1:0",
|
listenAddr: "127.0.0.1:0",
|
||||||
@@ -329,9 +329,9 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
|
|||||||
defer rp.Stop()
|
defer rp.Stop()
|
||||||
|
|
||||||
// simulate first time dial failure
|
// simulate first time dial failure
|
||||||
peerConfig := DefaultPeerConfig()
|
conf := config.DefaultP2PConfig()
|
||||||
peerConfig.DialFail = true
|
conf.TestDialFail = true
|
||||||
err = sw.addOutboundPeerWithConfig(rp.Addr(), peerConfig, true)
|
err = sw.addOutboundPeerWithConfig(rp.Addr(), conf, true)
|
||||||
require.NotNil(err)
|
require.NotNil(err)
|
||||||
|
|
||||||
// DialPeerWithAddres - sw.peerConfig resets the dialer
|
// DialPeerWithAddres - sw.peerConfig resets the dialer
|
||||||
@@ -348,7 +348,7 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSwitchFullConnectivity(t *testing.T) {
|
func TestSwitchFullConnectivity(t *testing.T) {
|
||||||
switches := MakeConnectedSwitches(config, 3, initSwitchFunc, Connect2Switches)
|
switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches)
|
||||||
defer func() {
|
defer func() {
|
||||||
for _, sw := range switches {
|
for _, sw := range switches {
|
||||||
sw.Stop()
|
sw.Stop()
|
||||||
|
@@ -8,7 +8,7 @@ import (
|
|||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
|
|
||||||
cfg "github.com/tendermint/tendermint/config"
|
"github.com/tendermint/tendermint/config"
|
||||||
"github.com/tendermint/tendermint/p2p/conn"
|
"github.com/tendermint/tendermint/p2p/conn"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -56,7 +56,7 @@ const TEST_HOST = "localhost"
|
|||||||
// If connect==Connect2Switches, the switches will be fully connected.
|
// If connect==Connect2Switches, the switches will be fully connected.
|
||||||
// initSwitch defines how the i'th switch should be initialized (ie. with what reactors).
|
// initSwitch defines how the i'th switch should be initialized (ie. with what reactors).
|
||||||
// NOTE: panics if any switch fails to start.
|
// NOTE: panics if any switch fails to start.
|
||||||
func MakeConnectedSwitches(cfg *cfg.P2PConfig, n int, initSwitch func(int, *Switch) *Switch, connect func([]*Switch, int, int)) []*Switch {
|
func MakeConnectedSwitches(cfg *config.P2PConfig, n int, initSwitch func(int, *Switch) *Switch, connect func([]*Switch, int, int)) []*Switch {
|
||||||
switches := make([]*Switch, n)
|
switches := make([]*Switch, n)
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
switches[i] = MakeSwitch(cfg, i, TEST_HOST, "123.123.123", initSwitch)
|
switches[i] = MakeSwitch(cfg, i, TEST_HOST, "123.123.123", initSwitch)
|
||||||
@@ -104,7 +104,7 @@ func Connect2Switches(switches []*Switch, i, j int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
|
func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
|
||||||
pc, err := newInboundPeerConn(conn, sw.peerConfig, sw.nodeKey.PrivKey)
|
pc, err := newInboundPeerConn(conn, sw.config, sw.nodeKey.PrivKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err := conn.Close(); err != nil {
|
if err := conn.Close(); err != nil {
|
||||||
sw.Logger.Error("Error closing connection", "err", err)
|
sw.Logger.Error("Error closing connection", "err", err)
|
||||||
@@ -131,7 +131,7 @@ func StartSwitches(switches []*Switch) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func MakeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch func(int, *Switch) *Switch) *Switch {
|
func MakeSwitch(cfg *config.P2PConfig, i int, network, version string, initSwitch func(int, *Switch) *Switch) *Switch {
|
||||||
// new switch, add reactors
|
// new switch, add reactors
|
||||||
// TODO: let the config be passed in?
|
// TODO: let the config be passed in?
|
||||||
nodeKey := &NodeKey{
|
nodeKey := &NodeKey{
|
||||||
|
@@ -51,9 +51,9 @@ var (
|
|||||||
|
|
||||||
// interfaces defined in types and above
|
// interfaces defined in types and above
|
||||||
stateDB dbm.DB
|
stateDB dbm.DB
|
||||||
blockStore types.BlockStore
|
blockStore sm.BlockStore
|
||||||
mempool types.Mempool
|
mempool sm.Mempool
|
||||||
evidencePool types.EvidencePool
|
evidencePool sm.EvidencePool
|
||||||
consensusState Consensus
|
consensusState Consensus
|
||||||
p2pSwitch P2P
|
p2pSwitch P2P
|
||||||
|
|
||||||
@@ -72,15 +72,15 @@ func SetStateDB(db dbm.DB) {
|
|||||||
stateDB = db
|
stateDB = db
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetBlockStore(bs types.BlockStore) {
|
func SetBlockStore(bs sm.BlockStore) {
|
||||||
blockStore = bs
|
blockStore = bs
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetMempool(mem types.Mempool) {
|
func SetMempool(mem sm.Mempool) {
|
||||||
mempool = mem
|
mempool = mem
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetEvidencePool(evpool types.EvidencePool) {
|
func SetEvidencePool(evpool sm.EvidencePool) {
|
||||||
evidencePool = evpool
|
evidencePool = evpool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -125,6 +125,10 @@ func SetEventBus(b *types.EventBus) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func validatePage(page, perPage, totalCount int) int {
|
func validatePage(page, perPage, totalCount int) int {
|
||||||
|
if perPage < 1 {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
pages := ((totalCount - 1) / perPage) + 1
|
pages := ((totalCount - 1) / perPage) + 1
|
||||||
if page < 1 {
|
if page < 1 {
|
||||||
page = 1
|
page = 1
|
||||||
|
@@ -15,6 +15,8 @@ func TestPaginationPage(t *testing.T) {
|
|||||||
page int
|
page int
|
||||||
newPage int
|
newPage int
|
||||||
}{
|
}{
|
||||||
|
{0, 0, 1, 1},
|
||||||
|
|
||||||
{0, 10, 0, 1},
|
{0, 10, 0, 1},
|
||||||
{0, 10, 1, 1},
|
{0, 10, 1, 1},
|
||||||
{0, 10, 2, 1},
|
{0, 10, 2, 1},
|
||||||
|
@@ -189,8 +189,8 @@ func TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSear
|
|||||||
}
|
}
|
||||||
|
|
||||||
totalCount := len(results)
|
totalCount := len(results)
|
||||||
page = validatePage(page, perPage, totalCount)
|
|
||||||
perPage = validatePerPage(perPage)
|
perPage = validatePerPage(perPage)
|
||||||
|
page = validatePage(page, perPage, totalCount)
|
||||||
skipCount := (page - 1) * perPage
|
skipCount := (page - 1) * perPage
|
||||||
|
|
||||||
apiResults := make([]*ctypes.ResultTx, cmn.MinInt(perPage, totalCount-skipCount))
|
apiResults := make([]*ctypes.ResultTx, cmn.MinInt(perPage, totalCount-skipCount))
|
||||||
|
@@ -32,7 +32,7 @@ func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, cdc *amin
|
|||||||
}
|
}
|
||||||
|
|
||||||
// JSONRPC endpoints
|
// JSONRPC endpoints
|
||||||
mux.HandleFunc("/", makeJSONRPCHandler(funcMap, cdc, logger))
|
mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, cdc, logger)))
|
||||||
}
|
}
|
||||||
|
|
||||||
//-------------------------------------
|
//-------------------------------------
|
||||||
@@ -153,6 +153,19 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Since the pattern "/" matches all paths not matched by other registered patterns we check whether the path is indeed
|
||||||
|
// "/", otherwise return a 404 error
|
||||||
|
if r.URL.Path != "/" {
|
||||||
|
http.NotFound(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
next(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func mapParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params map[string]json.RawMessage, argsOffset int) ([]reflect.Value, error) {
|
func mapParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params map[string]json.RawMessage, argsOffset int) ([]reflect.Value, error) {
|
||||||
values := make([]reflect.Value, len(rpcFunc.argNames))
|
values := make([]reflect.Value, len(rpcFunc.argNames))
|
||||||
for i, argName := range rpcFunc.argNames {
|
for i, argName := range rpcFunc.argNames {
|
||||||
|
@@ -97,3 +97,14 @@ func TestRPCNotification(t *testing.T) {
|
|||||||
require.Nil(t, err, "reading from the body should not give back an error")
|
require.Nil(t, err, "reading from the body should not give back an error")
|
||||||
require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server")
|
require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUnknownRPCPath(t *testing.T) {
|
||||||
|
mux := testMux()
|
||||||
|
req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", nil)
|
||||||
|
rec := httptest.NewRecorder()
|
||||||
|
mux.ServeHTTP(rec, req)
|
||||||
|
res := rec.Result()
|
||||||
|
|
||||||
|
// Always expecting back a 404 error
|
||||||
|
require.Equal(t, http.StatusNotFound, res.StatusCode, "should always return 404")
|
||||||
|
}
|
||||||
|
@@ -15,11 +15,11 @@ import (
|
|||||||
|
|
||||||
cfg "github.com/tendermint/tendermint/config"
|
cfg "github.com/tendermint/tendermint/config"
|
||||||
nm "github.com/tendermint/tendermint/node"
|
nm "github.com/tendermint/tendermint/node"
|
||||||
|
"github.com/tendermint/tendermint/privval"
|
||||||
"github.com/tendermint/tendermint/proxy"
|
"github.com/tendermint/tendermint/proxy"
|
||||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||||
core_grpc "github.com/tendermint/tendermint/rpc/grpc"
|
core_grpc "github.com/tendermint/tendermint/rpc/grpc"
|
||||||
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
|
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
|
||||||
pvm "github.com/tendermint/tendermint/types/priv_validator"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var globalConfig *cfg.Config
|
var globalConfig *cfg.Config
|
||||||
@@ -118,7 +118,7 @@ func NewTendermint(app abci.Application) *nm.Node {
|
|||||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||||
logger = log.NewFilter(logger, log.AllowError())
|
logger = log.NewFilter(logger, log.AllowError())
|
||||||
pvFile := config.PrivValidatorFile()
|
pvFile := config.PrivValidatorFile()
|
||||||
pv := pvm.LoadOrGenFilePV(pvFile)
|
pv := privval.LoadOrGenFilePV(pvFile)
|
||||||
papp := proxy.NewLocalClientCreator(app)
|
papp := proxy.NewLocalClientCreator(app)
|
||||||
node, err := nm.NewNode(config, pv, papp,
|
node, err := nm.NewNode(config, pv, papp,
|
||||||
nm.DefaultGenesisDocProviderFunc(config),
|
nm.DefaultGenesisDocProviderFunc(config),
|
||||||
|
54
scripts/install/install_tendermint_bsd.sh
Normal file
54
scripts/install/install_tendermint_bsd.sh
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
#!/usr/bin/tcsh
|
||||||
|
|
||||||
|
# XXX: this script is intended to be run from
|
||||||
|
# a fresh Digital Ocean droplet with FreeBSD
|
||||||
|
# Just run tcsh install_tendermint_bsd.sh
|
||||||
|
|
||||||
|
# upon its completion, you must either reset
|
||||||
|
# your terminal or run `source ~/.tcshrc`
|
||||||
|
|
||||||
|
# This assumes your installing it through tcsh as root.
|
||||||
|
# Change the relevant lines from tcsh to csh if your
|
||||||
|
# installing as a different user, along with changing the
|
||||||
|
# gopath.
|
||||||
|
|
||||||
|
# change this to a specific release or branch
|
||||||
|
set BRANCH=master
|
||||||
|
|
||||||
|
sudo pkg update
|
||||||
|
|
||||||
|
sudo pkg upgrade -y
|
||||||
|
sudo pkg install -y gmake
|
||||||
|
sudo pkg install -y git
|
||||||
|
|
||||||
|
# get and unpack golang
|
||||||
|
curl -O https://storage.googleapis.com/golang/go1.10.freebsd-amd64.tar.gz
|
||||||
|
tar -xvf go1.10.freebsd-amd64.tar.gz
|
||||||
|
|
||||||
|
# move go binary and add to path
|
||||||
|
mv go /usr/local
|
||||||
|
set path=($path /usr/local/go/bin)
|
||||||
|
|
||||||
|
|
||||||
|
# create the go directory, set GOPATH, and put it on PATH
|
||||||
|
mkdir go
|
||||||
|
echo "setenv GOPATH /root/go" >> ~/.tcshrc
|
||||||
|
setenv GOPATH /root/go
|
||||||
|
echo "set path=($path $GOPATH/bin)" >> ~/.tcshrc
|
||||||
|
|
||||||
|
source ~/.tcshrc
|
||||||
|
|
||||||
|
# get the code and move into repo
|
||||||
|
set REPO=github.com/tendermint/tendermint
|
||||||
|
go get $REPO
|
||||||
|
cd $GOPATH/src/$REPO
|
||||||
|
|
||||||
|
# build & install master
|
||||||
|
git checkout $BRANCH
|
||||||
|
gmake get_tools
|
||||||
|
gmake get_vendor_deps
|
||||||
|
gmake install
|
||||||
|
|
||||||
|
# the binary is located in $GOPATH/bin
|
||||||
|
# run `source ~/.profile` or reset your terminal
|
||||||
|
# to persist the changes
|
49
scripts/install/install_tendermint_ubuntu.sh
Normal file
49
scripts/install/install_tendermint_ubuntu.sh
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# XXX: this script is intended to be run from
|
||||||
|
# a fresh Digital Ocean droplet with Ubuntu
|
||||||
|
|
||||||
|
# upon its completion, you must either reset
|
||||||
|
# your terminal or run `source ~/.profile`
|
||||||
|
|
||||||
|
# as written, this script will install
|
||||||
|
# tendermint core from master branch
|
||||||
|
REPO=github.com/tendermint/tendermint
|
||||||
|
|
||||||
|
# change this to a specific release or branch
|
||||||
|
BRANCH=master
|
||||||
|
|
||||||
|
sudo apt-get update -y
|
||||||
|
sudo apt-get upgrade -y
|
||||||
|
sudo apt-get install -y make
|
||||||
|
|
||||||
|
# get and unpack golang
|
||||||
|
curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz
|
||||||
|
tar -xvf go1.10.linux-amd64.tar.gz
|
||||||
|
|
||||||
|
# move go binary and add to path
|
||||||
|
mv go /usr/local
|
||||||
|
echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile
|
||||||
|
|
||||||
|
# create the goApps directory, set GOPATH, and put it on PATH
|
||||||
|
mkdir goApps
|
||||||
|
echo "export GOPATH=/root/goApps" >> ~/.profile
|
||||||
|
echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile
|
||||||
|
|
||||||
|
source ~/.profile
|
||||||
|
|
||||||
|
# get the code and move into repo
|
||||||
|
go get $REPO
|
||||||
|
cd $GOPATH/src/$REPO
|
||||||
|
|
||||||
|
# build & install
|
||||||
|
git checkout $BRANCH
|
||||||
|
# XXX: uncomment if branch isn't master
|
||||||
|
# git fetch origin $BRANCH
|
||||||
|
make get_tools
|
||||||
|
make get_vendor_deps
|
||||||
|
make install
|
||||||
|
|
||||||
|
# the binary is located in $GOPATH/bin
|
||||||
|
# run `source ~/.profile` or reset your terminal
|
||||||
|
# to persist the changes
|
@@ -13,8 +13,8 @@ import (
|
|||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
|
|
||||||
"github.com/tendermint/tendermint/p2p"
|
"github.com/tendermint/tendermint/p2p"
|
||||||
|
"github.com/tendermint/tendermint/privval"
|
||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
priv_val "github.com/tendermint/tendermint/types/priv_validator"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type GenesisValidator struct {
|
type GenesisValidator struct {
|
||||||
@@ -84,7 +84,7 @@ func convertPrivVal(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) {
|
|||||||
var pubKey crypto.PubKeyEd25519
|
var pubKey crypto.PubKeyEd25519
|
||||||
copy(pubKey[:], privVal.PubKey.Data)
|
copy(pubKey[:], privVal.PubKey.Data)
|
||||||
|
|
||||||
privValNew := priv_val.FilePV{
|
privValNew := privval.FilePV{
|
||||||
Address: pubKey.Address(),
|
Address: pubKey.Address(),
|
||||||
PubKey: pubKey,
|
PubKey: pubKey,
|
||||||
LastHeight: privVal.LastHeight,
|
LastHeight: privVal.LastHeight,
|
||||||
|
@@ -5,7 +5,6 @@ import (
|
|||||||
|
|
||||||
fail "github.com/ebuchman/fail-test"
|
fail "github.com/ebuchman/fail-test"
|
||||||
abci "github.com/tendermint/abci/types"
|
abci "github.com/tendermint/abci/types"
|
||||||
crypto "github.com/tendermint/go-crypto"
|
|
||||||
"github.com/tendermint/tendermint/proxy"
|
"github.com/tendermint/tendermint/proxy"
|
||||||
"github.com/tendermint/tendermint/types"
|
"github.com/tendermint/tendermint/types"
|
||||||
dbm "github.com/tendermint/tmlibs/db"
|
dbm "github.com/tendermint/tmlibs/db"
|
||||||
@@ -29,8 +28,8 @@ type BlockExecutor struct {
|
|||||||
eventBus types.BlockEventPublisher
|
eventBus types.BlockEventPublisher
|
||||||
|
|
||||||
// update these with block results after commit
|
// update these with block results after commit
|
||||||
mempool types.Mempool
|
mempool Mempool
|
||||||
evpool types.EvidencePool
|
evpool EvidencePool
|
||||||
|
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
}
|
}
|
||||||
@@ -38,7 +37,7 @@ type BlockExecutor struct {
|
|||||||
// NewBlockExecutor returns a new BlockExecutor with a NopEventBus.
|
// NewBlockExecutor returns a new BlockExecutor with a NopEventBus.
|
||||||
// Call SetEventBus to provide one.
|
// Call SetEventBus to provide one.
|
||||||
func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus,
|
func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus,
|
||||||
mempool types.Mempool, evpool types.EvidencePool) *BlockExecutor {
|
mempool Mempool, evpool EvidencePool) *BlockExecutor {
|
||||||
return &BlockExecutor{
|
return &BlockExecutor{
|
||||||
db: db,
|
db: db,
|
||||||
proxyApp: proxyApp,
|
proxyApp: proxyApp,
|
||||||
@@ -59,8 +58,8 @@ func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher)
|
|||||||
// If the block is invalid, it returns an error.
|
// If the block is invalid, it returns an error.
|
||||||
// Validation does not mutate state, but does require historical information from the stateDB,
|
// Validation does not mutate state, but does require historical information from the stateDB,
|
||||||
// ie. to verify evidence from a validator at an old height.
|
// ie. to verify evidence from a validator at an old height.
|
||||||
func (blockExec *BlockExecutor) ValidateBlock(s State, block *types.Block) error {
|
func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) error {
|
||||||
return validateBlock(blockExec.db, s, block)
|
return validateBlock(blockExec.db, state, block)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ApplyBlock validates the block against the state, executes it against the app,
|
// ApplyBlock validates the block against the state, executes it against the app,
|
||||||
@@ -68,15 +67,15 @@ func (blockExec *BlockExecutor) ValidateBlock(s State, block *types.Block) error
|
|||||||
// It's the only function that needs to be called
|
// It's the only function that needs to be called
|
||||||
// from outside this package to process and commit an entire block.
|
// from outside this package to process and commit an entire block.
|
||||||
// It takes a blockID to avoid recomputing the parts hash.
|
// It takes a blockID to avoid recomputing the parts hash.
|
||||||
func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block *types.Block) (State, error) {
|
func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, block *types.Block) (State, error) {
|
||||||
|
|
||||||
if err := blockExec.ValidateBlock(s, block); err != nil {
|
if err := blockExec.ValidateBlock(state, block); err != nil {
|
||||||
return s, ErrInvalidBlock(err)
|
return state, ErrInvalidBlock(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block)
|
abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, state.LastValidators, blockExec.db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return s, ErrProxyAppConn(err)
|
return state, ErrProxyAppConn(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fail.Fail() // XXX
|
fail.Fail() // XXX
|
||||||
@@ -87,35 +86,33 @@ func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block
|
|||||||
fail.Fail() // XXX
|
fail.Fail() // XXX
|
||||||
|
|
||||||
// update the state with the block and responses
|
// update the state with the block and responses
|
||||||
s, err = updateState(s, blockID, block.Header, abciResponses)
|
state, err = updateState(state, blockID, block.Header, abciResponses)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return s, fmt.Errorf("Commit failed for application: %v", err)
|
return state, fmt.Errorf("Commit failed for application: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// lock mempool, commit state, update mempoool
|
// lock mempool, commit app state, update mempoool
|
||||||
appHash, err := blockExec.Commit(block)
|
appHash, err := blockExec.Commit(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return s, fmt.Errorf("Commit failed for application: %v", err)
|
return state, fmt.Errorf("Commit failed for application: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update evpool with the block and state.
|
||||||
|
blockExec.evpool.Update(block, state)
|
||||||
|
|
||||||
fail.Fail() // XXX
|
fail.Fail() // XXX
|
||||||
|
|
||||||
// update the app hash and save the state
|
// update the app hash and save the state
|
||||||
s.AppHash = appHash
|
state.AppHash = appHash
|
||||||
SaveState(blockExec.db, s)
|
SaveState(blockExec.db, state)
|
||||||
|
|
||||||
fail.Fail() // XXX
|
fail.Fail() // XXX
|
||||||
|
|
||||||
// Update evpool now that state is saved
|
|
||||||
// TODO: handle the crash/recover scenario
|
|
||||||
// ie. (may need to call Update for last block)
|
|
||||||
blockExec.evpool.Update(block)
|
|
||||||
|
|
||||||
// events are fired after everything else
|
// events are fired after everything else
|
||||||
// NOTE: if we crash between Commit and Save, events wont be fired during replay
|
// NOTE: if we crash between Commit and Save, events wont be fired during replay
|
||||||
fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses)
|
fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses)
|
||||||
|
|
||||||
return s, nil
|
return state, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit locks the mempool, runs the ABCI Commit message, and updates the mempool.
|
// Commit locks the mempool, runs the ABCI Commit message, and updates the mempool.
|
||||||
@@ -160,7 +157,8 @@ func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) {
|
|||||||
|
|
||||||
// Executes block's transactions on proxyAppConn.
|
// Executes block's transactions on proxyAppConn.
|
||||||
// Returns a list of transaction results and updates to the validator set
|
// Returns a list of transaction results and updates to the validator set
|
||||||
func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, block *types.Block) (*ABCIResponses, error) {
|
func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus,
|
||||||
|
block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) (*ABCIResponses, error) {
|
||||||
var validTxs, invalidTxs = 0, 0
|
var validTxs, invalidTxs = 0, 0
|
||||||
|
|
||||||
txIndex := 0
|
txIndex := 0
|
||||||
@@ -186,29 +184,14 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus,
|
|||||||
}
|
}
|
||||||
proxyAppConn.SetResponseCallback(proxyCb)
|
proxyAppConn.SetResponseCallback(proxyCb)
|
||||||
|
|
||||||
// determine which validators did not sign last block
|
signVals, byzVals := getBeginBlockValidatorInfo(block, lastValSet, stateDB)
|
||||||
absentVals := make([]int32, 0)
|
|
||||||
for valI, vote := range block.LastCommit.Precommits {
|
|
||||||
if vote == nil {
|
|
||||||
absentVals = append(absentVals, int32(valI))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: determine which validators were byzantine
|
|
||||||
byzantineVals := make([]abci.Evidence, len(block.Evidence.Evidence))
|
|
||||||
for i, ev := range block.Evidence.Evidence {
|
|
||||||
byzantineVals[i] = abci.Evidence{
|
|
||||||
PubKey: ev.Address(), // XXX
|
|
||||||
Height: ev.Height(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Begin block
|
// Begin block
|
||||||
_, err := proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{
|
_, err := proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{
|
||||||
Hash: block.Hash(),
|
Hash: block.Hash(),
|
||||||
Header: types.TM2PB.Header(block.Header),
|
Header: types.TM2PB.Header(block.Header),
|
||||||
AbsentValidators: absentVals,
|
Validators: signVals,
|
||||||
ByzantineValidators: byzantineVals,
|
ByzantineValidators: byzVals,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Error in proxyAppConn.BeginBlock", "err", err)
|
logger.Error("Error in proxyAppConn.BeginBlock", "err", err)
|
||||||
@@ -240,31 +223,70 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus,
|
|||||||
return abciResponses, nil
|
return abciResponses, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) ([]abci.SigningValidator, []abci.Evidence) {
|
||||||
|
|
||||||
|
// Sanity check that commit length matches validator set size -
|
||||||
|
// only applies after first block
|
||||||
|
if block.Height > 1 {
|
||||||
|
precommitLen := len(block.LastCommit.Precommits)
|
||||||
|
valSetLen := len(lastValSet.Validators)
|
||||||
|
if precommitLen != valSetLen {
|
||||||
|
// sanity check
|
||||||
|
panic(fmt.Sprintf("precommit length (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v",
|
||||||
|
precommitLen, valSetLen, block.Height, block.LastCommit.Precommits, lastValSet.Validators))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// determine which validators did not sign last block.
|
||||||
|
signVals := make([]abci.SigningValidator, len(lastValSet.Validators))
|
||||||
|
for i, val := range lastValSet.Validators {
|
||||||
|
var vote *types.Vote
|
||||||
|
if i < len(block.LastCommit.Precommits) {
|
||||||
|
vote = block.LastCommit.Precommits[i]
|
||||||
|
}
|
||||||
|
val := abci.SigningValidator{
|
||||||
|
Validator: types.TM2PB.Validator(val),
|
||||||
|
SignedLastBlock: vote != nil,
|
||||||
|
}
|
||||||
|
signVals[i] = val
|
||||||
|
}
|
||||||
|
|
||||||
|
byzVals := make([]abci.Evidence, len(block.Evidence.Evidence))
|
||||||
|
for i, ev := range block.Evidence.Evidence {
|
||||||
|
// We need the validator set. We already did this in validateBlock.
|
||||||
|
// TODO: Should we instead cache the valset in the evidence itself and add
|
||||||
|
// `SetValidatorSet()` and `ToABCI` methods ?
|
||||||
|
valset, err := LoadValidators(stateDB, ev.Height())
|
||||||
|
if err != nil {
|
||||||
|
panic(err) // shouldn't happen
|
||||||
|
}
|
||||||
|
byzVals[i] = types.TM2PB.Evidence(ev, valset, block.Time)
|
||||||
|
}
|
||||||
|
|
||||||
|
return signVals, byzVals
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// If more or equal than 1/3 of total voting power changed in one block, then
|
// If more or equal than 1/3 of total voting power changed in one block, then
|
||||||
// a light client could never prove the transition externally. See
|
// a light client could never prove the transition externally. See
|
||||||
// ./lite/doc.go for details on how a light client tracks validators.
|
// ./lite/doc.go for details on how a light client tracks validators.
|
||||||
func updateValidators(currentSet *types.ValidatorSet, updates []abci.Validator) error {
|
func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.Validator) error {
|
||||||
for _, v := range updates {
|
updates, err := types.PB2TM.Validators(abciUpdates)
|
||||||
pubkey, err := crypto.PubKeyFromBytes(v.PubKey) // NOTE: expects go-amino encoded pubkey
|
if err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
|
||||||
|
|
||||||
address := pubkey.Address()
|
|
||||||
power := int64(v.Power)
|
|
||||||
// mind the overflow from int64
|
|
||||||
if power < 0 {
|
|
||||||
return fmt.Errorf("Power (%d) overflows int64", v.Power)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// these are tendermint types now
|
||||||
|
for _, valUpdate := range updates {
|
||||||
|
address := valUpdate.Address
|
||||||
_, val := currentSet.GetByAddress(address)
|
_, val := currentSet.GetByAddress(address)
|
||||||
if val == nil {
|
if val == nil {
|
||||||
// add val
|
// add val
|
||||||
added := currentSet.Add(types.NewValidator(pubkey, power))
|
added := currentSet.Add(valUpdate)
|
||||||
if !added {
|
if !added {
|
||||||
return fmt.Errorf("Failed to add new validator %X with voting power %d", address, power)
|
return fmt.Errorf("Failed to add new validator %v", valUpdate)
|
||||||
}
|
}
|
||||||
} else if v.Power == 0 {
|
} else if valUpdate.VotingPower == 0 {
|
||||||
// remove val
|
// remove val
|
||||||
_, removed := currentSet.Remove(address)
|
_, removed := currentSet.Remove(address)
|
||||||
if !removed {
|
if !removed {
|
||||||
@@ -272,10 +294,9 @@ func updateValidators(currentSet *types.ValidatorSet, updates []abci.Validator)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// update val
|
// update val
|
||||||
val.VotingPower = power
|
updated := currentSet.Update(valUpdate)
|
||||||
updated := currentSet.Update(val)
|
|
||||||
if !updated {
|
if !updated {
|
||||||
return fmt.Errorf("Failed to update validator %X with voting power %d", address, power)
|
return fmt.Errorf("Failed to update validator %X to %v", address, valUpdate)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -283,20 +304,20 @@ func updateValidators(currentSet *types.ValidatorSet, updates []abci.Validator)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// updateState returns a new State updated according to the header and responses.
|
// updateState returns a new State updated according to the header and responses.
|
||||||
func updateState(s State, blockID types.BlockID, header *types.Header,
|
func updateState(state State, blockID types.BlockID, header *types.Header,
|
||||||
abciResponses *ABCIResponses) (State, error) {
|
abciResponses *ABCIResponses) (State, error) {
|
||||||
|
|
||||||
// copy the valset so we can apply changes from EndBlock
|
// copy the valset so we can apply changes from EndBlock
|
||||||
// and update s.LastValidators and s.Validators
|
// and update s.LastValidators and s.Validators
|
||||||
prevValSet := s.Validators.Copy()
|
prevValSet := state.Validators.Copy()
|
||||||
nextValSet := prevValSet.Copy()
|
nextValSet := prevValSet.Copy()
|
||||||
|
|
||||||
// update the validator set with the latest abciResponses
|
// update the validator set with the latest abciResponses
|
||||||
lastHeightValsChanged := s.LastHeightValidatorsChanged
|
lastHeightValsChanged := state.LastHeightValidatorsChanged
|
||||||
if len(abciResponses.EndBlock.ValidatorUpdates) > 0 {
|
if len(abciResponses.EndBlock.ValidatorUpdates) > 0 {
|
||||||
err := updateValidators(nextValSet, abciResponses.EndBlock.ValidatorUpdates)
|
err := updateValidators(nextValSet, abciResponses.EndBlock.ValidatorUpdates)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return s, fmt.Errorf("Error changing validator set: %v", err)
|
return state, fmt.Errorf("Error changing validator set: %v", err)
|
||||||
}
|
}
|
||||||
// change results from this height but only applies to the next height
|
// change results from this height but only applies to the next height
|
||||||
lastHeightValsChanged = header.Height + 1
|
lastHeightValsChanged = header.Height + 1
|
||||||
@@ -306,14 +327,14 @@ func updateState(s State, blockID types.BlockID, header *types.Header,
|
|||||||
nextValSet.IncrementAccum(1)
|
nextValSet.IncrementAccum(1)
|
||||||
|
|
||||||
// update the params with the latest abciResponses
|
// update the params with the latest abciResponses
|
||||||
nextParams := s.ConsensusParams
|
nextParams := state.ConsensusParams
|
||||||
lastHeightParamsChanged := s.LastHeightConsensusParamsChanged
|
lastHeightParamsChanged := state.LastHeightConsensusParamsChanged
|
||||||
if abciResponses.EndBlock.ConsensusParamUpdates != nil {
|
if abciResponses.EndBlock.ConsensusParamUpdates != nil {
|
||||||
// NOTE: must not mutate s.ConsensusParams
|
// NOTE: must not mutate s.ConsensusParams
|
||||||
nextParams = s.ConsensusParams.Update(abciResponses.EndBlock.ConsensusParamUpdates)
|
nextParams = state.ConsensusParams.Update(abciResponses.EndBlock.ConsensusParamUpdates)
|
||||||
err := nextParams.Validate()
|
err := nextParams.Validate()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return s, fmt.Errorf("Error updating consensus params: %v", err)
|
return state, fmt.Errorf("Error updating consensus params: %v", err)
|
||||||
}
|
}
|
||||||
// change results from this height but only applies to the next height
|
// change results from this height but only applies to the next height
|
||||||
lastHeightParamsChanged = header.Height + 1
|
lastHeightParamsChanged = header.Height + 1
|
||||||
@@ -322,13 +343,13 @@ func updateState(s State, blockID types.BlockID, header *types.Header,
|
|||||||
// NOTE: the AppHash has not been populated.
|
// NOTE: the AppHash has not been populated.
|
||||||
// It will be filled on state.Save.
|
// It will be filled on state.Save.
|
||||||
return State{
|
return State{
|
||||||
ChainID: s.ChainID,
|
ChainID: state.ChainID,
|
||||||
LastBlockHeight: header.Height,
|
LastBlockHeight: header.Height,
|
||||||
LastBlockTotalTx: s.LastBlockTotalTx + header.NumTxs,
|
LastBlockTotalTx: state.LastBlockTotalTx + header.NumTxs,
|
||||||
LastBlockID: blockID,
|
LastBlockID: blockID,
|
||||||
LastBlockTime: header.Time,
|
LastBlockTime: header.Time,
|
||||||
Validators: nextValSet,
|
Validators: nextValSet,
|
||||||
LastValidators: s.Validators.Copy(),
|
LastValidators: state.Validators.Copy(),
|
||||||
LastHeightValidatorsChanged: lastHeightValsChanged,
|
LastHeightValidatorsChanged: lastHeightValsChanged,
|
||||||
ConsensusParams: nextParams,
|
ConsensusParams: nextParams,
|
||||||
LastHeightConsensusParamsChanged: lastHeightParamsChanged,
|
LastHeightConsensusParamsChanged: lastHeightParamsChanged,
|
||||||
@@ -359,8 +380,9 @@ func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *ty
|
|||||||
|
|
||||||
// ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state.
|
// ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state.
|
||||||
// It returns the application root hash (result of abci.Commit).
|
// It returns the application root hash (result of abci.Commit).
|
||||||
func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block, logger log.Logger) ([]byte, error) {
|
func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block,
|
||||||
_, err := execBlockOnProxyApp(logger, appConnConsensus, block)
|
logger log.Logger, lastValSet *types.ValidatorSet, stateDB dbm.DB) ([]byte, error) {
|
||||||
|
_, err := execBlockOnProxyApp(logger, appConnConsensus, block, lastValSet, stateDB)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Error executing block on proxy app", "height", block.Height, "err", err)
|
logger.Error("Error executing block on proxy app", "height", block.Height, "err", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@@ -1,6 +1,7 @@
|
|||||||
package state
|
package state
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -10,15 +11,15 @@ import (
|
|||||||
"github.com/tendermint/abci/example/kvstore"
|
"github.com/tendermint/abci/example/kvstore"
|
||||||
abci "github.com/tendermint/abci/types"
|
abci "github.com/tendermint/abci/types"
|
||||||
crypto "github.com/tendermint/go-crypto"
|
crypto "github.com/tendermint/go-crypto"
|
||||||
"github.com/tendermint/tendermint/proxy"
|
|
||||||
"github.com/tendermint/tendermint/types"
|
|
||||||
cmn "github.com/tendermint/tmlibs/common"
|
cmn "github.com/tendermint/tmlibs/common"
|
||||||
dbm "github.com/tendermint/tmlibs/db"
|
dbm "github.com/tendermint/tmlibs/db"
|
||||||
"github.com/tendermint/tmlibs/log"
|
"github.com/tendermint/tmlibs/log"
|
||||||
|
|
||||||
|
"github.com/tendermint/tendermint/proxy"
|
||||||
|
"github.com/tendermint/tendermint/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
privKey = crypto.GenPrivKeyEd25519FromSecret([]byte("execution_test"))
|
|
||||||
chainID = "execution_chain"
|
chainID = "execution_chain"
|
||||||
testPartSize = 65536
|
testPartSize = 65536
|
||||||
nTxsPerBlock = 10
|
nTxsPerBlock = 10
|
||||||
@@ -31,10 +32,10 @@ func TestApplyBlock(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
defer proxyApp.Stop()
|
defer proxyApp.Stop()
|
||||||
|
|
||||||
state, stateDB := state(), dbm.NewMemDB()
|
state, stateDB := state(1, 1)
|
||||||
|
|
||||||
blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(),
|
blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(),
|
||||||
types.MockMempool{}, types.MockEvidencePool{})
|
MockMempool{}, MockEvidencePool{})
|
||||||
|
|
||||||
block := makeBlock(state, 1)
|
block := makeBlock(state, 1)
|
||||||
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
|
blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()}
|
||||||
@@ -45,8 +46,8 @@ func TestApplyBlock(t *testing.T) {
|
|||||||
// TODO check state and mempool
|
// TODO check state and mempool
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestBeginBlockAbsentValidators ensures we send absent validators list.
|
// TestBeginBlockValidators ensures we send absent validators list.
|
||||||
func TestBeginBlockAbsentValidators(t *testing.T) {
|
func TestBeginBlockValidators(t *testing.T) {
|
||||||
app := &testApp{}
|
app := &testApp{}
|
||||||
cc := proxy.NewLocalClientCreator(app)
|
cc := proxy.NewLocalClientCreator(app)
|
||||||
proxyApp := proxy.NewAppConns(cc, nil)
|
proxyApp := proxy.NewAppConns(cc, nil)
|
||||||
@@ -54,32 +55,46 @@ func TestBeginBlockAbsentValidators(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
defer proxyApp.Stop()
|
defer proxyApp.Stop()
|
||||||
|
|
||||||
state := state()
|
state, stateDB := state(2, 2)
|
||||||
|
|
||||||
prevHash := state.LastBlockID.Hash
|
prevHash := state.LastBlockID.Hash
|
||||||
prevParts := types.PartSetHeader{}
|
prevParts := types.PartSetHeader{}
|
||||||
prevBlockID := types.BlockID{prevHash, prevParts}
|
prevBlockID := types.BlockID{prevHash, prevParts}
|
||||||
|
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
|
vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}
|
||||||
|
vote1 := &types.Vote{ValidatorIndex: 1, Timestamp: now}
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
desc string
|
desc string
|
||||||
lastCommitPrecommits []*types.Vote
|
lastCommitPrecommits []*types.Vote
|
||||||
expectedAbsentValidators []int32
|
expectedAbsentValidators []int
|
||||||
}{
|
}{
|
||||||
{"none absent", []*types.Vote{{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}, {ValidatorIndex: 1, Timestamp: now}}, []int32{}},
|
{"none absent", []*types.Vote{vote0, vote1}, []int{}},
|
||||||
{"one absent", []*types.Vote{{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}, nil}, []int32{1}},
|
{"one absent", []*types.Vote{vote0, nil}, []int{1}},
|
||||||
{"multiple absent", []*types.Vote{nil, nil}, []int32{0, 1}},
|
{"multiple absent", []*types.Vote{nil, nil}, []int{0, 1}},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: tc.lastCommitPrecommits}
|
lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: tc.lastCommitPrecommits}
|
||||||
|
|
||||||
|
// block for height 2
|
||||||
block, _ := state.MakeBlock(2, makeTxs(2), lastCommit)
|
block, _ := state.MakeBlock(2, makeTxs(2), lastCommit)
|
||||||
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger())
|
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB)
|
||||||
require.Nil(t, err, tc.desc)
|
require.Nil(t, err, tc.desc)
|
||||||
|
|
||||||
// -> app must receive an index of the absent validator
|
// -> app receives a list of validators with a bool indicating if they signed
|
||||||
assert.Equal(t, tc.expectedAbsentValidators, app.AbsentValidators, tc.desc)
|
ctr := 0
|
||||||
|
for i, v := range app.Validators {
|
||||||
|
if ctr < len(tc.expectedAbsentValidators) &&
|
||||||
|
tc.expectedAbsentValidators[ctr] == i {
|
||||||
|
|
||||||
|
assert.False(t, v.SignedLastBlock)
|
||||||
|
ctr++
|
||||||
|
} else {
|
||||||
|
assert.True(t, v.SignedLastBlock)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,35 +107,41 @@ func TestBeginBlockByzantineValidators(t *testing.T) {
|
|||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
defer proxyApp.Stop()
|
defer proxyApp.Stop()
|
||||||
|
|
||||||
state := state()
|
state, stateDB := state(2, 12)
|
||||||
|
|
||||||
prevHash := state.LastBlockID.Hash
|
prevHash := state.LastBlockID.Hash
|
||||||
prevParts := types.PartSetHeader{}
|
prevParts := types.PartSetHeader{}
|
||||||
prevBlockID := types.BlockID{prevHash, prevParts}
|
prevBlockID := types.BlockID{prevHash, prevParts}
|
||||||
|
|
||||||
height1, idx1, val1 := int64(8), 0, []byte("val1")
|
height1, idx1, val1 := int64(8), 0, state.Validators.Validators[0].Address
|
||||||
height2, idx2, val2 := int64(3), 1, []byte("val2")
|
height2, idx2, val2 := int64(3), 1, state.Validators.Validators[1].Address
|
||||||
ev1 := types.NewMockGoodEvidence(height1, idx1, val1)
|
ev1 := types.NewMockGoodEvidence(height1, idx1, val1)
|
||||||
ev2 := types.NewMockGoodEvidence(height2, idx2, val2)
|
ev2 := types.NewMockGoodEvidence(height2, idx2, val2)
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
valSet := state.Validators
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
desc string
|
desc string
|
||||||
evidence []types.Evidence
|
evidence []types.Evidence
|
||||||
expectedByzantineValidators []abci.Evidence
|
expectedByzantineValidators []abci.Evidence
|
||||||
}{
|
}{
|
||||||
{"none byzantine", []types.Evidence{}, []abci.Evidence{}},
|
{"none byzantine", []types.Evidence{}, []abci.Evidence{}},
|
||||||
{"one byzantine", []types.Evidence{ev1}, []abci.Evidence{{ev1.Address(), ev1.Height()}}},
|
{"one byzantine", []types.Evidence{ev1}, []abci.Evidence{types.TM2PB.Evidence(ev1, valSet, now)}},
|
||||||
{"multiple byzantine", []types.Evidence{ev1, ev2}, []abci.Evidence{
|
{"multiple byzantine", []types.Evidence{ev1, ev2}, []abci.Evidence{
|
||||||
{ev1.Address(), ev1.Height()},
|
types.TM2PB.Evidence(ev1, valSet, now),
|
||||||
{ev2.Address(), ev2.Height()}}},
|
types.TM2PB.Evidence(ev2, valSet, now)}},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit}
|
||||||
|
vote1 := &types.Vote{ValidatorIndex: 1, Timestamp: now}
|
||||||
|
votes := []*types.Vote{vote0, vote1}
|
||||||
|
lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: votes}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
lastCommit := &types.Commit{BlockID: prevBlockID}
|
|
||||||
|
|
||||||
block, _ := state.MakeBlock(10, makeTxs(2), lastCommit)
|
block, _ := state.MakeBlock(10, makeTxs(2), lastCommit)
|
||||||
|
block.Time = now
|
||||||
block.Evidence.Evidence = tc.evidence
|
block.Evidence.Evidence = tc.evidence
|
||||||
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger())
|
_, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB)
|
||||||
require.Nil(t, err, tc.desc)
|
require.Nil(t, err, tc.desc)
|
||||||
|
|
||||||
// -> app must receive an index of the byzantine validator
|
// -> app must receive an index of the byzantine validator
|
||||||
@@ -138,15 +159,30 @@ func makeTxs(height int64) (txs []types.Tx) {
|
|||||||
return txs
|
return txs
|
||||||
}
|
}
|
||||||
|
|
||||||
func state() State {
|
func state(nVals, height int) (State, dbm.DB) {
|
||||||
|
vals := make([]types.GenesisValidator, nVals)
|
||||||
|
for i := 0; i < nVals; i++ {
|
||||||
|
secret := []byte(fmt.Sprintf("test%d", i))
|
||||||
|
pk := crypto.GenPrivKeyEd25519FromSecret(secret)
|
||||||
|
vals[i] = types.GenesisValidator{
|
||||||
|
pk.PubKey(), 1000, fmt.Sprintf("test%d", i),
|
||||||
|
}
|
||||||
|
}
|
||||||
s, _ := MakeGenesisState(&types.GenesisDoc{
|
s, _ := MakeGenesisState(&types.GenesisDoc{
|
||||||
ChainID: chainID,
|
ChainID: chainID,
|
||||||
Validators: []types.GenesisValidator{
|
Validators: vals,
|
||||||
{privKey.PubKey(), 10000, "test"},
|
AppHash: nil,
|
||||||
},
|
|
||||||
AppHash: nil,
|
|
||||||
})
|
})
|
||||||
return s
|
|
||||||
|
// save validators to db for 2 heights
|
||||||
|
stateDB := dbm.NewMemDB()
|
||||||
|
SaveState(stateDB, s)
|
||||||
|
|
||||||
|
for i := 1; i < height; i++ {
|
||||||
|
s.LastBlockHeight += 1
|
||||||
|
SaveState(stateDB, s)
|
||||||
|
}
|
||||||
|
return s, stateDB
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeBlock(state State, height int64) *types.Block {
|
func makeBlock(state State, height int64) *types.Block {
|
||||||
@@ -161,7 +197,7 @@ var _ abci.Application = (*testApp)(nil)
|
|||||||
type testApp struct {
|
type testApp struct {
|
||||||
abci.BaseApplication
|
abci.BaseApplication
|
||||||
|
|
||||||
AbsentValidators []int32
|
Validators []abci.SigningValidator
|
||||||
ByzantineValidators []abci.Evidence
|
ByzantineValidators []abci.Evidence
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -174,7 +210,7 @@ func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
|
func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
|
||||||
app.AbsentValidators = req.AbsentValidators
|
app.Validators = req.Validators
|
||||||
app.ByzantineValidators = req.ByzantineValidators
|
app.ByzantineValidators = req.ByzantineValidators
|
||||||
return abci.ResponseBeginBlock{}
|
return abci.ResponseBeginBlock{}
|
||||||
}
|
}
|
||||||
|
@@ -1,11 +1,10 @@
|
|||||||
package types
|
package state
|
||||||
|
|
||||||
import (
|
import (
|
||||||
abci "github.com/tendermint/abci/types"
|
abci "github.com/tendermint/abci/types"
|
||||||
|
"github.com/tendermint/tendermint/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NOTE/XXX: all type definitions in this file are considered UNSTABLE
|
|
||||||
|
|
||||||
//------------------------------------------------------
|
//------------------------------------------------------
|
||||||
// blockchain services types
|
// blockchain services types
|
||||||
// NOTE: Interfaces used by RPC must be thread safe!
|
// NOTE: Interfaces used by RPC must be thread safe!
|
||||||
@@ -17,15 +16,14 @@ import (
|
|||||||
// Mempool defines the mempool interface as used by the ConsensusState.
|
// Mempool defines the mempool interface as used by the ConsensusState.
|
||||||
// Updates to the mempool need to be synchronized with committing a block
|
// Updates to the mempool need to be synchronized with committing a block
|
||||||
// so apps can reset their transient state on Commit
|
// so apps can reset their transient state on Commit
|
||||||
// UNSTABLE
|
|
||||||
type Mempool interface {
|
type Mempool interface {
|
||||||
Lock()
|
Lock()
|
||||||
Unlock()
|
Unlock()
|
||||||
|
|
||||||
Size() int
|
Size() int
|
||||||
CheckTx(Tx, func(*abci.Response)) error
|
CheckTx(types.Tx, func(*abci.Response)) error
|
||||||
Reap(int) Txs
|
Reap(int) types.Txs
|
||||||
Update(height int64, txs Txs) error
|
Update(height int64, txs types.Txs) error
|
||||||
Flush()
|
Flush()
|
||||||
FlushAppConn() error
|
FlushAppConn() error
|
||||||
|
|
||||||
@@ -34,60 +32,55 @@ type Mempool interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MockMempool is an empty implementation of a Mempool, useful for testing.
|
// MockMempool is an empty implementation of a Mempool, useful for testing.
|
||||||
// UNSTABLE
|
|
||||||
type MockMempool struct {
|
type MockMempool struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m MockMempool) Lock() {}
|
func (m MockMempool) Lock() {}
|
||||||
func (m MockMempool) Unlock() {}
|
func (m MockMempool) Unlock() {}
|
||||||
func (m MockMempool) Size() int { return 0 }
|
func (m MockMempool) Size() int { return 0 }
|
||||||
func (m MockMempool) CheckTx(tx Tx, cb func(*abci.Response)) error { return nil }
|
func (m MockMempool) CheckTx(tx types.Tx, cb func(*abci.Response)) error { return nil }
|
||||||
func (m MockMempool) Reap(n int) Txs { return Txs{} }
|
func (m MockMempool) Reap(n int) types.Txs { return types.Txs{} }
|
||||||
func (m MockMempool) Update(height int64, txs Txs) error { return nil }
|
func (m MockMempool) Update(height int64, txs types.Txs) error { return nil }
|
||||||
func (m MockMempool) Flush() {}
|
func (m MockMempool) Flush() {}
|
||||||
func (m MockMempool) FlushAppConn() error { return nil }
|
func (m MockMempool) FlushAppConn() error { return nil }
|
||||||
func (m MockMempool) TxsAvailable() <-chan int64 { return make(chan int64) }
|
func (m MockMempool) TxsAvailable() <-chan int64 { return make(chan int64) }
|
||||||
func (m MockMempool) EnableTxsAvailable() {}
|
func (m MockMempool) EnableTxsAvailable() {}
|
||||||
|
|
||||||
//------------------------------------------------------
|
//------------------------------------------------------
|
||||||
// blockstore
|
// blockstore
|
||||||
|
|
||||||
// BlockStoreRPC is the block store interface used by the RPC.
|
// BlockStoreRPC is the block store interface used by the RPC.
|
||||||
// UNSTABLE
|
|
||||||
type BlockStoreRPC interface {
|
type BlockStoreRPC interface {
|
||||||
Height() int64
|
Height() int64
|
||||||
|
|
||||||
LoadBlockMeta(height int64) *BlockMeta
|
LoadBlockMeta(height int64) *types.BlockMeta
|
||||||
LoadBlock(height int64) *Block
|
LoadBlock(height int64) *types.Block
|
||||||
LoadBlockPart(height int64, index int) *Part
|
LoadBlockPart(height int64, index int) *types.Part
|
||||||
|
|
||||||
LoadBlockCommit(height int64) *Commit
|
LoadBlockCommit(height int64) *types.Commit
|
||||||
LoadSeenCommit(height int64) *Commit
|
LoadSeenCommit(height int64) *types.Commit
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockStore defines the BlockStore interface used by the ConsensusState.
|
// BlockStore defines the BlockStore interface used by the ConsensusState.
|
||||||
// UNSTABLE
|
|
||||||
type BlockStore interface {
|
type BlockStore interface {
|
||||||
BlockStoreRPC
|
BlockStoreRPC
|
||||||
SaveBlock(block *Block, blockParts *PartSet, seenCommit *Commit)
|
SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit)
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------
|
//-----------------------------------------------------------------------------------------------------
|
||||||
// evidence pool
|
// evidence pool
|
||||||
|
|
||||||
// EvidencePool defines the EvidencePool interface used by the ConsensusState.
|
// EvidencePool defines the EvidencePool interface used by the ConsensusState.
|
||||||
// UNSTABLE
|
|
||||||
type EvidencePool interface {
|
type EvidencePool interface {
|
||||||
PendingEvidence() []Evidence
|
PendingEvidence() []types.Evidence
|
||||||
AddEvidence(Evidence) error
|
AddEvidence(types.Evidence) error
|
||||||
Update(*Block)
|
Update(*types.Block, State)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockMempool is an empty implementation of a Mempool, useful for testing.
|
// MockMempool is an empty implementation of a Mempool, useful for testing.
|
||||||
// UNSTABLE
|
|
||||||
type MockEvidencePool struct {
|
type MockEvidencePool struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m MockEvidencePool) PendingEvidence() []Evidence { return nil }
|
func (m MockEvidencePool) PendingEvidence() []types.Evidence { return nil }
|
||||||
func (m MockEvidencePool) AddEvidence(Evidence) error { return nil }
|
func (m MockEvidencePool) AddEvidence(types.Evidence) error { return nil }
|
||||||
func (m MockEvidencePool) Update(*Block) {}
|
func (m MockEvidencePool) Update(*types.Block, State) {}
|
@@ -55,67 +55,67 @@ type State struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copy makes a copy of the State for mutating.
|
// Copy makes a copy of the State for mutating.
|
||||||
func (s State) Copy() State {
|
func (state State) Copy() State {
|
||||||
return State{
|
return State{
|
||||||
ChainID: s.ChainID,
|
ChainID: state.ChainID,
|
||||||
|
|
||||||
LastBlockHeight: s.LastBlockHeight,
|
LastBlockHeight: state.LastBlockHeight,
|
||||||
LastBlockTotalTx: s.LastBlockTotalTx,
|
LastBlockTotalTx: state.LastBlockTotalTx,
|
||||||
LastBlockID: s.LastBlockID,
|
LastBlockID: state.LastBlockID,
|
||||||
LastBlockTime: s.LastBlockTime,
|
LastBlockTime: state.LastBlockTime,
|
||||||
|
|
||||||
Validators: s.Validators.Copy(),
|
Validators: state.Validators.Copy(),
|
||||||
LastValidators: s.LastValidators.Copy(),
|
LastValidators: state.LastValidators.Copy(),
|
||||||
LastHeightValidatorsChanged: s.LastHeightValidatorsChanged,
|
LastHeightValidatorsChanged: state.LastHeightValidatorsChanged,
|
||||||
|
|
||||||
ConsensusParams: s.ConsensusParams,
|
ConsensusParams: state.ConsensusParams,
|
||||||
LastHeightConsensusParamsChanged: s.LastHeightConsensusParamsChanged,
|
LastHeightConsensusParamsChanged: state.LastHeightConsensusParamsChanged,
|
||||||
|
|
||||||
AppHash: s.AppHash,
|
AppHash: state.AppHash,
|
||||||
|
|
||||||
LastResultsHash: s.LastResultsHash,
|
LastResultsHash: state.LastResultsHash,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Equals returns true if the States are identical.
|
// Equals returns true if the States are identical.
|
||||||
func (s State) Equals(s2 State) bool {
|
func (state State) Equals(state2 State) bool {
|
||||||
sbz, s2bz := s.Bytes(), s2.Bytes()
|
sbz, s2bz := state.Bytes(), state2.Bytes()
|
||||||
return bytes.Equal(sbz, s2bz)
|
return bytes.Equal(sbz, s2bz)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bytes serializes the State using go-amino.
|
// Bytes serializes the State using go-amino.
|
||||||
func (s State) Bytes() []byte {
|
func (state State) Bytes() []byte {
|
||||||
return cdc.MustMarshalBinaryBare(s)
|
return cdc.MustMarshalBinaryBare(state)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsEmpty returns true if the State is equal to the empty State.
|
// IsEmpty returns true if the State is equal to the empty State.
|
||||||
func (s State) IsEmpty() bool {
|
func (state State) IsEmpty() bool {
|
||||||
return s.Validators == nil // XXX can't compare to Empty
|
return state.Validators == nil // XXX can't compare to Empty
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetValidators returns the last and current validator sets.
|
// GetValidators returns the last and current validator sets.
|
||||||
func (s State) GetValidators() (last *types.ValidatorSet, current *types.ValidatorSet) {
|
func (state State) GetValidators() (last *types.ValidatorSet, current *types.ValidatorSet) {
|
||||||
return s.LastValidators, s.Validators
|
return state.LastValidators, state.Validators
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------
|
//------------------------------------------------------------------------
|
||||||
// Create a block from the latest state
|
// Create a block from the latest state
|
||||||
|
|
||||||
// MakeBlock builds a block with the given txs and commit from the current state.
|
// MakeBlock builds a block with the given txs and commit from the current state.
|
||||||
func (s State) MakeBlock(height int64, txs []types.Tx, commit *types.Commit) (*types.Block, *types.PartSet) {
|
func (state State) MakeBlock(height int64, txs []types.Tx, commit *types.Commit) (*types.Block, *types.PartSet) {
|
||||||
// build base block
|
// build base block
|
||||||
block := types.MakeBlock(height, txs, commit)
|
block := types.MakeBlock(height, txs, commit)
|
||||||
|
|
||||||
// fill header with state data
|
// fill header with state data
|
||||||
block.ChainID = s.ChainID
|
block.ChainID = state.ChainID
|
||||||
block.TotalTxs = s.LastBlockTotalTx + block.NumTxs
|
block.TotalTxs = state.LastBlockTotalTx + block.NumTxs
|
||||||
block.LastBlockID = s.LastBlockID
|
block.LastBlockID = state.LastBlockID
|
||||||
block.ValidatorsHash = s.Validators.Hash()
|
block.ValidatorsHash = state.Validators.Hash()
|
||||||
block.AppHash = s.AppHash
|
block.AppHash = state.AppHash
|
||||||
block.ConsensusHash = s.ConsensusParams.Hash()
|
block.ConsensusHash = state.ConsensusParams.Hash()
|
||||||
block.LastResultsHash = s.LastResultsHash
|
block.LastResultsHash = state.LastResultsHash
|
||||||
|
|
||||||
return block, block.MakePartSet(s.ConsensusParams.BlockGossip.BlockPartSizeBytes)
|
return block, block.MakePartSet(state.ConsensusParams.BlockGossip.BlockPartSizeBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------
|
//------------------------------------------------------------------------
|
||||||
|
@@ -78,10 +78,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) {
|
|||||||
abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: nil}
|
abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: nil}
|
||||||
abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Tags: nil}
|
abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Tags: nil}
|
||||||
abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.Validator{
|
abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.Validator{
|
||||||
{
|
types.TM2PB.ValidatorFromPubKeyAndPower(crypto.GenPrivKeyEd25519().PubKey(), 10),
|
||||||
PubKey: crypto.GenPrivKeyEd25519().PubKey().Bytes(),
|
|
||||||
Power: 10,
|
|
||||||
},
|
|
||||||
}}
|
}}
|
||||||
|
|
||||||
saveABCIResponses(stateDB, block.Height, abciResponses)
|
saveABCIResponses(stateDB, block.Height, abciResponses)
|
||||||
@@ -435,8 +432,8 @@ func makeHeaderPartsResponsesValPubKeyChange(state State, height int64,
|
|||||||
if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) {
|
if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) {
|
||||||
abciResponses.EndBlock = &abci.ResponseEndBlock{
|
abciResponses.EndBlock = &abci.ResponseEndBlock{
|
||||||
ValidatorUpdates: []abci.Validator{
|
ValidatorUpdates: []abci.Validator{
|
||||||
{val.PubKey.Bytes(), 0},
|
types.TM2PB.ValidatorFromPubKeyAndPower(val.PubKey, 0),
|
||||||
{pubkey.Bytes(), 10},
|
types.TM2PB.ValidatorFromPubKeyAndPower(pubkey, 10),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -457,7 +454,7 @@ func makeHeaderPartsResponsesValPowerChange(state State, height int64,
|
|||||||
if val.VotingPower != power {
|
if val.VotingPower != power {
|
||||||
abciResponses.EndBlock = &abci.ResponseEndBlock{
|
abciResponses.EndBlock = &abci.ResponseEndBlock{
|
||||||
ValidatorUpdates: []abci.Validator{
|
ValidatorUpdates: []abci.Validator{
|
||||||
{val.PubKey.Bytes(), power},
|
types.TM2PB.ValidatorFromPubKeyAndPower(val.PubKey, power),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -80,15 +80,15 @@ func loadState(db dbm.DB, key []byte) (state State) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SaveState persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database.
|
// SaveState persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database.
|
||||||
func SaveState(db dbm.DB, s State) {
|
func SaveState(db dbm.DB, state State) {
|
||||||
saveState(db, s, stateKey)
|
saveState(db, state, stateKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
func saveState(db dbm.DB, s State, key []byte) {
|
func saveState(db dbm.DB, state State, key []byte) {
|
||||||
nextHeight := s.LastBlockHeight + 1
|
nextHeight := state.LastBlockHeight + 1
|
||||||
saveValidatorsInfo(db, nextHeight, s.LastHeightValidatorsChanged, s.Validators)
|
saveValidatorsInfo(db, nextHeight, state.LastHeightValidatorsChanged, state.Validators)
|
||||||
saveConsensusParamsInfo(db, nextHeight, s.LastHeightConsensusParamsChanged, s.ConsensusParams)
|
saveConsensusParamsInfo(db, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams)
|
||||||
db.SetSync(stateKey, s.Bytes())
|
db.SetSync(stateKey, state.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------
|
//------------------------------------------------------------------------
|
||||||
@@ -173,11 +173,12 @@ func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if valInfo.ValidatorSet == nil {
|
if valInfo.ValidatorSet == nil {
|
||||||
valInfo = loadValidatorsInfo(db, valInfo.LastHeightChanged)
|
valInfo2 := loadValidatorsInfo(db, valInfo.LastHeightChanged)
|
||||||
if valInfo == nil {
|
if valInfo2 == nil {
|
||||||
cmn.PanicSanity(fmt.Sprintf(`Couldn't find validators at height %d as
|
cmn.PanicSanity(fmt.Sprintf(`Couldn't find validators at height %d as
|
||||||
last changed from height %d`, valInfo.LastHeightChanged, height))
|
last changed from height %d`, valInfo.LastHeightChanged, height))
|
||||||
}
|
}
|
||||||
|
valInfo = valInfo2
|
||||||
}
|
}
|
||||||
|
|
||||||
return valInfo.ValidatorSet, nil
|
return valInfo.ValidatorSet, nil
|
||||||
|
@@ -12,69 +12,72 @@ import (
|
|||||||
//-----------------------------------------------------
|
//-----------------------------------------------------
|
||||||
// Validate block
|
// Validate block
|
||||||
|
|
||||||
func validateBlock(stateDB dbm.DB, s State, b *types.Block) error {
|
func validateBlock(stateDB dbm.DB, state State, block *types.Block) error {
|
||||||
// validate internal consistency
|
// validate internal consistency
|
||||||
if err := b.ValidateBasic(); err != nil {
|
if err := block.ValidateBasic(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate basic info
|
// validate basic info
|
||||||
if b.ChainID != s.ChainID {
|
if block.ChainID != state.ChainID {
|
||||||
return fmt.Errorf("Wrong Block.Header.ChainID. Expected %v, got %v", s.ChainID, b.ChainID)
|
return fmt.Errorf("Wrong Block.Header.ChainID. Expected %v, got %v", state.ChainID, block.ChainID)
|
||||||
}
|
}
|
||||||
if b.Height != s.LastBlockHeight+1 {
|
if block.Height != state.LastBlockHeight+1 {
|
||||||
return fmt.Errorf("Wrong Block.Header.Height. Expected %v, got %v", s.LastBlockHeight+1, b.Height)
|
return fmt.Errorf("Wrong Block.Header.Height. Expected %v, got %v", state.LastBlockHeight+1, block.Height)
|
||||||
}
|
}
|
||||||
/* TODO: Determine bounds for Time
|
/* TODO: Determine bounds for Time
|
||||||
See blockchain/reactor "stopSyncingDurationMinutes"
|
See blockchain/reactor "stopSyncingDurationMinutes"
|
||||||
|
|
||||||
if !b.Time.After(lastBlockTime) {
|
if !block.Time.After(lastBlockTime) {
|
||||||
return errors.New("Invalid Block.Header.Time")
|
return errors.New("Invalid Block.Header.Time")
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// validate prev block info
|
// validate prev block info
|
||||||
if !b.LastBlockID.Equals(s.LastBlockID) {
|
if !block.LastBlockID.Equals(state.LastBlockID) {
|
||||||
return fmt.Errorf("Wrong Block.Header.LastBlockID. Expected %v, got %v", s.LastBlockID, b.LastBlockID)
|
return fmt.Errorf("Wrong Block.Header.LastBlockID. Expected %v, got %v", state.LastBlockID, block.LastBlockID)
|
||||||
}
|
}
|
||||||
newTxs := int64(len(b.Data.Txs))
|
newTxs := int64(len(block.Data.Txs))
|
||||||
if b.TotalTxs != s.LastBlockTotalTx+newTxs {
|
if block.TotalTxs != state.LastBlockTotalTx+newTxs {
|
||||||
return fmt.Errorf("Wrong Block.Header.TotalTxs. Expected %v, got %v", s.LastBlockTotalTx+newTxs, b.TotalTxs)
|
return fmt.Errorf("Wrong Block.Header.TotalTxs. Expected %v, got %v", state.LastBlockTotalTx+newTxs, block.TotalTxs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate app info
|
// validate app info
|
||||||
if !bytes.Equal(b.AppHash, s.AppHash) {
|
if !bytes.Equal(block.AppHash, state.AppHash) {
|
||||||
return fmt.Errorf("Wrong Block.Header.AppHash. Expected %X, got %v", s.AppHash, b.AppHash)
|
return fmt.Errorf("Wrong Block.Header.AppHash. Expected %X, got %v", state.AppHash, block.AppHash)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(b.ConsensusHash, s.ConsensusParams.Hash()) {
|
if !bytes.Equal(block.ConsensusHash, state.ConsensusParams.Hash()) {
|
||||||
return fmt.Errorf("Wrong Block.Header.ConsensusHash. Expected %X, got %v", s.ConsensusParams.Hash(), b.ConsensusHash)
|
return fmt.Errorf("Wrong Block.Header.ConsensusHash. Expected %X, got %v", state.ConsensusParams.Hash(), block.ConsensusHash)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(b.LastResultsHash, s.LastResultsHash) {
|
if !bytes.Equal(block.LastResultsHash, state.LastResultsHash) {
|
||||||
return fmt.Errorf("Wrong Block.Header.LastResultsHash. Expected %X, got %v", s.LastResultsHash, b.LastResultsHash)
|
return fmt.Errorf("Wrong Block.Header.LastResultsHash. Expected %X, got %v", state.LastResultsHash, block.LastResultsHash)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(b.ValidatorsHash, s.Validators.Hash()) {
|
if !bytes.Equal(block.ValidatorsHash, state.Validators.Hash()) {
|
||||||
return fmt.Errorf("Wrong Block.Header.ValidatorsHash. Expected %X, got %v", s.Validators.Hash(), b.ValidatorsHash)
|
return fmt.Errorf("Wrong Block.Header.ValidatorsHash. Expected %X, got %v", state.Validators.Hash(), block.ValidatorsHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate block LastCommit.
|
// Validate block LastCommit.
|
||||||
if b.Height == 1 {
|
if block.Height == 1 {
|
||||||
if len(b.LastCommit.Precommits) != 0 {
|
if len(block.LastCommit.Precommits) != 0 {
|
||||||
return errors.New("Block at height 1 (first block) should have no LastCommit precommits")
|
return errors.New("Block at height 1 (first block) should have no LastCommit precommits")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if len(b.LastCommit.Precommits) != s.LastValidators.Size() {
|
if len(block.LastCommit.Precommits) != state.LastValidators.Size() {
|
||||||
return fmt.Errorf("Invalid block commit size. Expected %v, got %v",
|
return fmt.Errorf("Invalid block commit size. Expected %v, got %v",
|
||||||
s.LastValidators.Size(), len(b.LastCommit.Precommits))
|
state.LastValidators.Size(), len(block.LastCommit.Precommits))
|
||||||
}
|
}
|
||||||
err := s.LastValidators.VerifyCommit(
|
err := state.LastValidators.VerifyCommit(
|
||||||
s.ChainID, s.LastBlockID, b.Height-1, b.LastCommit)
|
state.ChainID, state.LastBlockID, block.Height-1, block.LastCommit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ev := range b.Evidence.Evidence {
|
// TODO: Each check requires loading an old validator set.
|
||||||
if err := VerifyEvidence(stateDB, s, ev); err != nil {
|
// We should cap the amount of evidence per block
|
||||||
|
// to prevent potential proposer DoS.
|
||||||
|
for _, ev := range block.Evidence.Evidence {
|
||||||
|
if err := VerifyEvidence(stateDB, state, ev); err != nil {
|
||||||
return types.NewEvidenceInvalidErr(ev, err)
|
return types.NewEvidenceInvalidErr(ev, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -82,25 +85,21 @@ func validateBlock(stateDB dbm.DB, s State, b *types.Block) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: What's cheaper (ie. what should be checked first):
|
// VerifyEvidence verifies the evidence fully by checking:
|
||||||
// evidence internal validity (ie. sig checks) or validator existed (fetch historical val set from db)
|
// - it is sufficiently recent (MaxAge)
|
||||||
|
// - it is from a key who was a validator at the given height
|
||||||
// VerifyEvidence verifies the evidence fully by checking it is internally
|
// - it is internally consistent
|
||||||
// consistent and sufficiently recent.
|
// - it was properly signed by the alleged equivocator
|
||||||
func VerifyEvidence(stateDB dbm.DB, s State, evidence types.Evidence) error {
|
func VerifyEvidence(stateDB dbm.DB, state State, evidence types.Evidence) error {
|
||||||
height := s.LastBlockHeight
|
height := state.LastBlockHeight
|
||||||
|
|
||||||
evidenceAge := height - evidence.Height()
|
evidenceAge := height - evidence.Height()
|
||||||
maxAge := s.ConsensusParams.EvidenceParams.MaxAge
|
maxAge := state.ConsensusParams.EvidenceParams.MaxAge
|
||||||
if evidenceAge > maxAge {
|
if evidenceAge > maxAge {
|
||||||
return fmt.Errorf("Evidence from height %d is too old. Min height is %d",
|
return fmt.Errorf("Evidence from height %d is too old. Min height is %d",
|
||||||
evidence.Height(), height-maxAge)
|
evidence.Height(), height-maxAge)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := evidence.Verify(s.ChainID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
valset, err := LoadValidators(stateDB, evidence.Height())
|
valset, err := LoadValidators(stateDB, evidence.Height())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: if err is just that we cant find it cuz we pruned, ignore.
|
// TODO: if err is just that we cant find it cuz we pruned, ignore.
|
||||||
@@ -108,14 +107,18 @@ func VerifyEvidence(stateDB dbm.DB, s State, evidence types.Evidence) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// The address must have been an active validator at the height
|
// The address must have been an active validator at the height.
|
||||||
|
// NOTE: we will ignore evidence from H if the key was not a validator
|
||||||
|
// at H, even if it is a validator at some nearby H'
|
||||||
ev := evidence
|
ev := evidence
|
||||||
height, addr, idx := ev.Height(), ev.Address(), ev.Index()
|
height, addr := ev.Height(), ev.Address()
|
||||||
valIdx, val := valset.GetByAddress(addr)
|
_, val := valset.GetByAddress(addr)
|
||||||
if val == nil {
|
if val == nil {
|
||||||
return fmt.Errorf("Address %X was not a validator at height %d", addr, height)
|
return fmt.Errorf("Address %X was not a validator at height %d", addr, height)
|
||||||
} else if idx != valIdx {
|
}
|
||||||
return fmt.Errorf("Address %X was validator %d at height %d, not %d", addr, valIdx, height, idx)
|
|
||||||
|
if err := evidence.Verify(state.ChainID, val.PubKey); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@@ -9,7 +9,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestValidateBlock(t *testing.T) {
|
func TestValidateBlock(t *testing.T) {
|
||||||
state := state()
|
state, _ := state(1, 1)
|
||||||
|
|
||||||
blockExec := NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nil, nil, nil)
|
blockExec := NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nil, nil, nil)
|
||||||
|
|
||||||
|
10
test/app/grpc_client.go
Executable file → Normal file
10
test/app/grpc_client.go
Executable file → Normal file
@@ -2,12 +2,12 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/tendermint/go-wire"
|
|
||||||
"github.com/tendermint/tendermint/rpc/grpc"
|
"github.com/tendermint/tendermint/rpc/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -32,5 +32,11 @@ func main() {
|
|||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
fmt.Println(string(wire.JSONBytes(res)))
|
|
||||||
|
bz, err := json.Marshal(res)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
fmt.Println(string(bz))
|
||||||
}
|
}
|
||||||
|
@@ -28,12 +28,11 @@ func (err *ErrEvidenceInvalid) Error() string {
|
|||||||
|
|
||||||
// Evidence represents any provable malicious activity by a validator
|
// Evidence represents any provable malicious activity by a validator
|
||||||
type Evidence interface {
|
type Evidence interface {
|
||||||
Height() int64 // height of the equivocation
|
Height() int64 // height of the equivocation
|
||||||
Address() []byte // address of the equivocating validator
|
Address() []byte // address of the equivocating validator
|
||||||
Index() int // index of the validator in the validator set
|
Hash() []byte // hash of the evidence
|
||||||
Hash() []byte // hash of the evidence
|
Verify(chainID string, pubKey crypto.PubKey) error // verify the evidence
|
||||||
Verify(chainID string) error // verify the evidence
|
Equal(Evidence) bool // check equality of evidence
|
||||||
Equal(Evidence) bool // check equality of evidence
|
|
||||||
|
|
||||||
String() string
|
String() string
|
||||||
}
|
}
|
||||||
@@ -68,11 +67,6 @@ func (dve *DuplicateVoteEvidence) Address() []byte {
|
|||||||
return dve.PubKey.Address()
|
return dve.PubKey.Address()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Index returns the index of the validator.
|
|
||||||
func (dve *DuplicateVoteEvidence) Index() int {
|
|
||||||
return dve.VoteA.ValidatorIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the hash of the evidence.
|
// Hash returns the hash of the evidence.
|
||||||
func (dve *DuplicateVoteEvidence) Hash() []byte {
|
func (dve *DuplicateVoteEvidence) Hash() []byte {
|
||||||
return aminoHasher(dve).Hash()
|
return aminoHasher(dve).Hash()
|
||||||
@@ -80,7 +74,7 @@ func (dve *DuplicateVoteEvidence) Hash() []byte {
|
|||||||
|
|
||||||
// Verify returns an error if the two votes aren't conflicting.
|
// Verify returns an error if the two votes aren't conflicting.
|
||||||
// To be conflicting, they must be from the same validator, for the same H/R/S, but for different blocks.
|
// To be conflicting, they must be from the same validator, for the same H/R/S, but for different blocks.
|
||||||
func (dve *DuplicateVoteEvidence) Verify(chainID string) error {
|
func (dve *DuplicateVoteEvidence) Verify(chainID string, pubKey crypto.PubKey) error {
|
||||||
// H/R/S must be the same
|
// H/R/S must be the same
|
||||||
if dve.VoteA.Height != dve.VoteB.Height ||
|
if dve.VoteA.Height != dve.VoteB.Height ||
|
||||||
dve.VoteA.Round != dve.VoteB.Round ||
|
dve.VoteA.Round != dve.VoteB.Round ||
|
||||||
@@ -92,7 +86,8 @@ func (dve *DuplicateVoteEvidence) Verify(chainID string) error {
|
|||||||
if !bytes.Equal(dve.VoteA.ValidatorAddress, dve.VoteB.ValidatorAddress) {
|
if !bytes.Equal(dve.VoteA.ValidatorAddress, dve.VoteB.ValidatorAddress) {
|
||||||
return fmt.Errorf("DuplicateVoteEvidence Error: Validator addresses do not match. Got %X and %X", dve.VoteA.ValidatorAddress, dve.VoteB.ValidatorAddress)
|
return fmt.Errorf("DuplicateVoteEvidence Error: Validator addresses do not match. Got %X and %X", dve.VoteA.ValidatorAddress, dve.VoteB.ValidatorAddress)
|
||||||
}
|
}
|
||||||
// XXX: Should we enforce index is the same ?
|
|
||||||
|
// Index must be the same
|
||||||
if dve.VoteA.ValidatorIndex != dve.VoteB.ValidatorIndex {
|
if dve.VoteA.ValidatorIndex != dve.VoteB.ValidatorIndex {
|
||||||
return fmt.Errorf("DuplicateVoteEvidence Error: Validator indices do not match. Got %d and %d", dve.VoteA.ValidatorIndex, dve.VoteB.ValidatorIndex)
|
return fmt.Errorf("DuplicateVoteEvidence Error: Validator indices do not match. Got %d and %d", dve.VoteA.ValidatorIndex, dve.VoteB.ValidatorIndex)
|
||||||
}
|
}
|
||||||
@@ -102,11 +97,18 @@ func (dve *DuplicateVoteEvidence) Verify(chainID string) error {
|
|||||||
return fmt.Errorf("DuplicateVoteEvidence Error: BlockIDs are the same (%v) - not a real duplicate vote", dve.VoteA.BlockID)
|
return fmt.Errorf("DuplicateVoteEvidence Error: BlockIDs are the same (%v) - not a real duplicate vote", dve.VoteA.BlockID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// pubkey must match address (this should already be true, sanity check)
|
||||||
|
addr := dve.VoteA.ValidatorAddress
|
||||||
|
if !bytes.Equal(pubKey.Address(), addr) {
|
||||||
|
return fmt.Errorf("DuplicateVoteEvidence FAILED SANITY CHECK - address (%X) doesn't match pubkey (%v - %X)",
|
||||||
|
addr, pubKey, pubKey.Address())
|
||||||
|
}
|
||||||
|
|
||||||
// Signatures must be valid
|
// Signatures must be valid
|
||||||
if !dve.PubKey.VerifyBytes(dve.VoteA.SignBytes(chainID), dve.VoteA.Signature) {
|
if !pubKey.VerifyBytes(dve.VoteA.SignBytes(chainID), dve.VoteA.Signature) {
|
||||||
return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteA: %v", ErrVoteInvalidSignature)
|
return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteA: %v", ErrVoteInvalidSignature)
|
||||||
}
|
}
|
||||||
if !dve.PubKey.VerifyBytes(dve.VoteB.SignBytes(chainID), dve.VoteB.Signature) {
|
if !pubKey.VerifyBytes(dve.VoteB.SignBytes(chainID), dve.VoteB.Signature) {
|
||||||
return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteB: %v", ErrVoteInvalidSignature)
|
return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteB: %v", ErrVoteInvalidSignature)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -131,29 +133,26 @@ func (dve *DuplicateVoteEvidence) Equal(ev Evidence) bool {
|
|||||||
type MockGoodEvidence struct {
|
type MockGoodEvidence struct {
|
||||||
Height_ int64
|
Height_ int64
|
||||||
Address_ []byte
|
Address_ []byte
|
||||||
Index_ int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UNSTABLE
|
// UNSTABLE
|
||||||
func NewMockGoodEvidence(height int64, index int, address []byte) MockGoodEvidence {
|
func NewMockGoodEvidence(height int64, idx int, address []byte) MockGoodEvidence {
|
||||||
return MockGoodEvidence{height, address, index}
|
return MockGoodEvidence{height, address}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e MockGoodEvidence) Height() int64 { return e.Height_ }
|
func (e MockGoodEvidence) Height() int64 { return e.Height_ }
|
||||||
func (e MockGoodEvidence) Address() []byte { return e.Address_ }
|
func (e MockGoodEvidence) Address() []byte { return e.Address_ }
|
||||||
func (e MockGoodEvidence) Index() int { return e.Index_ }
|
|
||||||
func (e MockGoodEvidence) Hash() []byte {
|
func (e MockGoodEvidence) Hash() []byte {
|
||||||
return []byte(fmt.Sprintf("%d-%d", e.Height_, e.Index_))
|
return []byte(fmt.Sprintf("%d-%x", e.Height_, e.Address_))
|
||||||
}
|
}
|
||||||
func (e MockGoodEvidence) Verify(chainID string) error { return nil }
|
func (e MockGoodEvidence) Verify(chainID string, pubKey crypto.PubKey) error { return nil }
|
||||||
func (e MockGoodEvidence) Equal(ev Evidence) bool {
|
func (e MockGoodEvidence) Equal(ev Evidence) bool {
|
||||||
e2 := ev.(MockGoodEvidence)
|
e2 := ev.(MockGoodEvidence)
|
||||||
return e.Height_ == e2.Height_ &&
|
return e.Height_ == e2.Height_ &&
|
||||||
bytes.Equal(e.Address_, e2.Address_) &&
|
bytes.Equal(e.Address_, e2.Address_)
|
||||||
e.Index_ == e2.Index_
|
|
||||||
}
|
}
|
||||||
func (e MockGoodEvidence) String() string {
|
func (e MockGoodEvidence) String() string {
|
||||||
return fmt.Sprintf("GoodEvidence: %d/%s/%d", e.Height_, e.Address_, e.Index_)
|
return fmt.Sprintf("GoodEvidence: %d/%s", e.Height_, e.Address_)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UNSTABLE
|
// UNSTABLE
|
||||||
@@ -161,15 +160,16 @@ type MockBadEvidence struct {
|
|||||||
MockGoodEvidence
|
MockGoodEvidence
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e MockBadEvidence) Verify(chainID string) error { return fmt.Errorf("MockBadEvidence") }
|
func (e MockBadEvidence) Verify(chainID string, pubKey crypto.PubKey) error {
|
||||||
|
return fmt.Errorf("MockBadEvidence")
|
||||||
|
}
|
||||||
func (e MockBadEvidence) Equal(ev Evidence) bool {
|
func (e MockBadEvidence) Equal(ev Evidence) bool {
|
||||||
e2 := ev.(MockBadEvidence)
|
e2 := ev.(MockBadEvidence)
|
||||||
return e.Height_ == e2.Height_ &&
|
return e.Height_ == e2.Height_ &&
|
||||||
bytes.Equal(e.Address_, e2.Address_) &&
|
bytes.Equal(e.Address_, e2.Address_)
|
||||||
e.Index_ == e2.Index_
|
|
||||||
}
|
}
|
||||||
func (e MockBadEvidence) String() string {
|
func (e MockBadEvidence) String() string {
|
||||||
return fmt.Sprintf("BadEvidence: %d/%s/%d", e.Height_, e.Address_, e.Index_)
|
return fmt.Sprintf("BadEvidence: %d/%s", e.Height_, e.Address_)
|
||||||
}
|
}
|
||||||
|
|
||||||
//-------------------------------------------
|
//-------------------------------------------
|
||||||
|
@@ -59,17 +59,16 @@ func TestEvidence(t *testing.T) {
|
|||||||
{vote1, badVote, false}, // signed by wrong key
|
{vote1, badVote, false}, // signed by wrong key
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pubKey := val.GetPubKey()
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
ev := &DuplicateVoteEvidence{
|
ev := &DuplicateVoteEvidence{
|
||||||
PubKey: val.GetPubKey(),
|
VoteA: c.vote1,
|
||||||
VoteA: c.vote1,
|
VoteB: c.vote2,
|
||||||
VoteB: c.vote2,
|
|
||||||
}
|
}
|
||||||
if c.valid {
|
if c.valid {
|
||||||
assert.Nil(t, ev.Verify(chainID), "evidence should be valid")
|
assert.Nil(t, ev.Verify(chainID, pubKey), "evidence should be valid")
|
||||||
} else {
|
} else {
|
||||||
assert.NotNil(t, ev.Verify(chainID), "evidence should be invalid")
|
assert.NotNil(t, ev.Verify(chainID, pubKey), "evidence should be invalid")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@@ -2,6 +2,5 @@ package types
|
|||||||
|
|
||||||
// UNSTABLE
|
// UNSTABLE
|
||||||
var (
|
var (
|
||||||
PeerStateKey = "ConsensusReactor.peerState"
|
PeerStateKey = "ConsensusReactor.peerState"
|
||||||
PeerMempoolChKey = "MempoolReactor.peerMempoolCh"
|
|
||||||
)
|
)
|
||||||
|
@@ -1,71 +1,221 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/tendermint/abci/types"
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
abci "github.com/tendermint/abci/types"
|
||||||
|
crypto "github.com/tendermint/go-crypto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TM2PB is used for converting Tendermint types to protobuf types.
|
//-------------------------------------------------------
|
||||||
|
// Use strings to distinguish types in ABCI messages
|
||||||
|
|
||||||
|
const (
|
||||||
|
ABCIEvidenceTypeDuplicateVote = "duplicate/vote"
|
||||||
|
ABCIEvidenceTypeMockGood = "mock/good"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ABCIPubKeyTypeEd25519 = "ed25519"
|
||||||
|
ABCIPubKeyTypeSecp256k1 = "secp256k1"
|
||||||
|
)
|
||||||
|
|
||||||
|
//-------------------------------------------------------
|
||||||
|
|
||||||
|
// TM2PB is used for converting Tendermint ABCI to protobuf ABCI.
|
||||||
// UNSTABLE
|
// UNSTABLE
|
||||||
var TM2PB = tm2pb{}
|
var TM2PB = tm2pb{}
|
||||||
|
|
||||||
type tm2pb struct{}
|
type tm2pb struct{}
|
||||||
|
|
||||||
func (tm2pb) Header(header *Header) types.Header {
|
func (tm2pb) Header(header *Header) abci.Header {
|
||||||
return types.Header{
|
return abci.Header{
|
||||||
ChainID: header.ChainID,
|
ChainID: header.ChainID,
|
||||||
Height: header.Height,
|
Height: header.Height,
|
||||||
Time: header.Time.Unix(),
|
|
||||||
NumTxs: int32(header.NumTxs), // XXX: overflow
|
Time: header.Time.Unix(),
|
||||||
LastBlockID: TM2PB.BlockID(header.LastBlockID),
|
NumTxs: int32(header.NumTxs), // XXX: overflow
|
||||||
LastCommitHash: header.LastCommitHash,
|
TotalTxs: header.TotalTxs,
|
||||||
DataHash: header.DataHash,
|
|
||||||
|
LastBlockHash: header.LastBlockID.Hash,
|
||||||
|
ValidatorsHash: header.ValidatorsHash,
|
||||||
AppHash: header.AppHash,
|
AppHash: header.AppHash,
|
||||||
|
|
||||||
|
// Proposer: TODO
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tm2pb) BlockID(blockID BlockID) types.BlockID {
|
// XXX: panics on unknown pubkey type
|
||||||
return types.BlockID{
|
func (tm2pb) Validator(val *Validator) abci.Validator {
|
||||||
Hash: blockID.Hash,
|
return abci.Validator{
|
||||||
Parts: TM2PB.PartSetHeader(blockID.PartsHeader),
|
Address: val.PubKey.Address(),
|
||||||
|
PubKey: TM2PB.PubKey(val.PubKey),
|
||||||
|
Power: val.VotingPower,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tm2pb) PartSetHeader(partSetHeader PartSetHeader) types.PartSetHeader {
|
// XXX: panics on nil or unknown pubkey type
|
||||||
return types.PartSetHeader{
|
// TODO: add cases when new pubkey types are added to go-crypto
|
||||||
Total: int32(partSetHeader.Total), // XXX: overflow
|
func (tm2pb) PubKey(pubKey crypto.PubKey) abci.PubKey {
|
||||||
Hash: partSetHeader.Hash,
|
switch pk := pubKey.(type) {
|
||||||
|
case crypto.PubKeyEd25519:
|
||||||
|
return abci.PubKey{
|
||||||
|
Type: ABCIPubKeyTypeEd25519,
|
||||||
|
Data: pk[:],
|
||||||
|
}
|
||||||
|
case crypto.PubKeySecp256k1:
|
||||||
|
return abci.PubKey{
|
||||||
|
Type: ABCIPubKeyTypeSecp256k1,
|
||||||
|
Data: pk[:],
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unknown pubkey type: %v %v", pubKey, reflect.TypeOf(pubKey)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tm2pb) Validator(val *Validator) types.Validator {
|
// XXX: panics on nil or unknown pubkey type
|
||||||
return types.Validator{
|
func (tm2pb) Validators(vals *ValidatorSet) []abci.Validator {
|
||||||
PubKey: val.PubKey.Bytes(),
|
validators := make([]abci.Validator, len(vals.Validators))
|
||||||
Power: val.VotingPower,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tm2pb) Validators(vals *ValidatorSet) []types.Validator {
|
|
||||||
validators := make([]types.Validator, len(vals.Validators))
|
|
||||||
for i, val := range vals.Validators {
|
for i, val := range vals.Validators {
|
||||||
validators[i] = TM2PB.Validator(val)
|
validators[i] = TM2PB.Validator(val)
|
||||||
}
|
}
|
||||||
return validators
|
return validators
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tm2pb) ConsensusParams(params *ConsensusParams) *types.ConsensusParams {
|
func (tm2pb) ConsensusParams(params *ConsensusParams) *abci.ConsensusParams {
|
||||||
return &types.ConsensusParams{
|
return &abci.ConsensusParams{
|
||||||
BlockSize: &types.BlockSize{
|
BlockSize: &abci.BlockSize{
|
||||||
|
|
||||||
MaxBytes: int32(params.BlockSize.MaxBytes),
|
MaxBytes: int32(params.BlockSize.MaxBytes),
|
||||||
MaxTxs: int32(params.BlockSize.MaxTxs),
|
MaxTxs: int32(params.BlockSize.MaxTxs),
|
||||||
MaxGas: params.BlockSize.MaxGas,
|
MaxGas: params.BlockSize.MaxGas,
|
||||||
},
|
},
|
||||||
TxSize: &types.TxSize{
|
TxSize: &abci.TxSize{
|
||||||
MaxBytes: int32(params.TxSize.MaxBytes),
|
MaxBytes: int32(params.TxSize.MaxBytes),
|
||||||
MaxGas: params.TxSize.MaxGas,
|
MaxGas: params.TxSize.MaxGas,
|
||||||
},
|
},
|
||||||
BlockGossip: &types.BlockGossip{
|
BlockGossip: &abci.BlockGossip{
|
||||||
BlockPartSizeBytes: int32(params.BlockGossip.BlockPartSizeBytes),
|
BlockPartSizeBytes: int32(params.BlockGossip.BlockPartSizeBytes),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ABCI Evidence includes information from the past that's not included in the evidence itself
|
||||||
|
// so Evidence types stays compact.
|
||||||
|
// XXX: panics on nil or unknown pubkey type
|
||||||
|
func (tm2pb) Evidence(ev Evidence, valSet *ValidatorSet, evTime time.Time) abci.Evidence {
|
||||||
|
_, val := valSet.GetByAddress(ev.Address())
|
||||||
|
if val == nil {
|
||||||
|
// should already have checked this
|
||||||
|
panic(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// set type
|
||||||
|
var evType string
|
||||||
|
switch ev.(type) {
|
||||||
|
case *DuplicateVoteEvidence:
|
||||||
|
evType = ABCIEvidenceTypeDuplicateVote
|
||||||
|
case MockGoodEvidence:
|
||||||
|
// XXX: not great to have test types in production paths ...
|
||||||
|
evType = ABCIEvidenceTypeMockGood
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("Unknown evidence type: %v %v", ev, reflect.TypeOf(ev)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return abci.Evidence{
|
||||||
|
Type: evType,
|
||||||
|
Validator: TM2PB.Validator(val),
|
||||||
|
Height: ev.Height(),
|
||||||
|
Time: evTime.Unix(),
|
||||||
|
TotalVotingPower: valSet.TotalVotingPower(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: panics on nil or unknown pubkey type
|
||||||
|
func (tm2pb) ValidatorFromPubKeyAndPower(pubkey crypto.PubKey, power int64) abci.Validator {
|
||||||
|
pubkeyABCI := TM2PB.PubKey(pubkey)
|
||||||
|
return abci.Validator{
|
||||||
|
Address: pubkey.Address(),
|
||||||
|
PubKey: pubkeyABCI,
|
||||||
|
Power: power,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// PB2TM is used for converting protobuf ABCI to Tendermint ABCI.
|
||||||
|
// UNSTABLE
|
||||||
|
var PB2TM = pb2tm{}
|
||||||
|
|
||||||
|
type pb2tm struct{}
|
||||||
|
|
||||||
|
func (pb2tm) PubKey(pubKey abci.PubKey) (crypto.PubKey, error) {
|
||||||
|
// TODO: define these in go-crypto and use them
|
||||||
|
sizeEd := 32
|
||||||
|
sizeSecp := 33
|
||||||
|
switch pubKey.Type {
|
||||||
|
case ABCIPubKeyTypeEd25519:
|
||||||
|
if len(pubKey.Data) != sizeEd {
|
||||||
|
return nil, fmt.Errorf("Invalid size for PubKeyEd25519. Got %d, expected %d", len(pubKey.Data), sizeEd)
|
||||||
|
}
|
||||||
|
var pk crypto.PubKeyEd25519
|
||||||
|
copy(pk[:], pubKey.Data)
|
||||||
|
return pk, nil
|
||||||
|
case ABCIPubKeyTypeSecp256k1:
|
||||||
|
if len(pubKey.Data) != sizeSecp {
|
||||||
|
return nil, fmt.Errorf("Invalid size for PubKeyEd25519. Got %d, expected %d", len(pubKey.Data), sizeSecp)
|
||||||
|
}
|
||||||
|
var pk crypto.PubKeySecp256k1
|
||||||
|
copy(pk[:], pubKey.Data)
|
||||||
|
return pk, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unknown pubkey type %v", pubKey.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb2tm) Validators(vals []abci.Validator) ([]*Validator, error) {
|
||||||
|
tmVals := make([]*Validator, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
pub, err := PB2TM.PubKey(v.PubKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// If the app provided an address too, it must match.
|
||||||
|
// This is just a sanity check.
|
||||||
|
if len(v.Address) > 0 {
|
||||||
|
if !bytes.Equal(pub.Address(), v.Address) {
|
||||||
|
return nil, fmt.Errorf("Validator.Address (%X) does not match PubKey.Address (%X)",
|
||||||
|
v.Address, pub.Address())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tmVals[i] = &Validator{
|
||||||
|
Address: pub.Address(),
|
||||||
|
PubKey: pub,
|
||||||
|
VotingPower: v.Power,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tmVals, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb2tm) ConsensusParams(csp *abci.ConsensusParams) ConsensusParams {
|
||||||
|
return ConsensusParams{
|
||||||
|
BlockSize: BlockSize{
|
||||||
|
MaxBytes: int(csp.BlockSize.MaxBytes), // XXX
|
||||||
|
MaxTxs: int(csp.BlockSize.MaxTxs), // XXX
|
||||||
|
MaxGas: csp.BlockSize.MaxGas,
|
||||||
|
},
|
||||||
|
TxSize: TxSize{
|
||||||
|
MaxBytes: int(csp.TxSize.MaxBytes), // XXX
|
||||||
|
MaxGas: csp.TxSize.MaxGas,
|
||||||
|
},
|
||||||
|
BlockGossip: BlockGossip{
|
||||||
|
BlockPartSizeBytes: int(csp.BlockGossip.BlockPartSizeBytes), // XXX
|
||||||
|
},
|
||||||
|
// TODO: EvidenceParams: EvidenceParams{
|
||||||
|
// MaxAge: int(csp.Evidence.MaxAge), // XXX
|
||||||
|
// },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
69
types/protobuf_test.go
Normal file
69
types/protobuf_test.go
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
abci "github.com/tendermint/abci/types"
|
||||||
|
crypto "github.com/tendermint/go-crypto"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestABCIPubKey(t *testing.T) {
|
||||||
|
pkEd := crypto.GenPrivKeyEd25519().PubKey()
|
||||||
|
pkSecp := crypto.GenPrivKeySecp256k1().PubKey()
|
||||||
|
testABCIPubKey(t, pkEd, ABCIPubKeyTypeEd25519)
|
||||||
|
testABCIPubKey(t, pkSecp, ABCIPubKeyTypeSecp256k1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testABCIPubKey(t *testing.T, pk crypto.PubKey, typeStr string) {
|
||||||
|
abciPubKey := TM2PB.PubKey(pk)
|
||||||
|
pk2, err := PB2TM.PubKey(abciPubKey)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, pk, pk2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestABCIValidators(t *testing.T) {
|
||||||
|
pkEd := crypto.GenPrivKeyEd25519().PubKey()
|
||||||
|
|
||||||
|
// correct validator
|
||||||
|
tmValExpected := &Validator{
|
||||||
|
Address: pkEd.Address(),
|
||||||
|
PubKey: pkEd,
|
||||||
|
VotingPower: 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
tmVal := &Validator{
|
||||||
|
Address: pkEd.Address(),
|
||||||
|
PubKey: pkEd,
|
||||||
|
VotingPower: 10,
|
||||||
|
}
|
||||||
|
|
||||||
|
abciVal := TM2PB.Validator(tmVal)
|
||||||
|
tmVals, err := PB2TM.Validators([]abci.Validator{abciVal})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, tmValExpected, tmVals[0])
|
||||||
|
|
||||||
|
// val with address
|
||||||
|
tmVal.Address = pkEd.Address()
|
||||||
|
|
||||||
|
abciVal = TM2PB.Validator(tmVal)
|
||||||
|
tmVals, err = PB2TM.Validators([]abci.Validator{abciVal})
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Equal(t, tmValExpected, tmVals[0])
|
||||||
|
|
||||||
|
// val with incorrect address
|
||||||
|
abciVal = TM2PB.Validator(tmVal)
|
||||||
|
abciVal.Address = []byte("incorrect!")
|
||||||
|
tmVals, err = PB2TM.Validators([]abci.Validator{abciVal})
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
assert.Nil(t, tmVals)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestABCIConsensusParams(t *testing.T) {
|
||||||
|
cp := DefaultConsensusParams()
|
||||||
|
cp.EvidenceParams.MaxAge = 0 // TODO add this to ABCI
|
||||||
|
abciCP := TM2PB.ConsensusParams(cp)
|
||||||
|
cp2 := PB2TM.ConsensusParams(abciCP)
|
||||||
|
|
||||||
|
assert.Equal(t, *cp, cp2)
|
||||||
|
}
|
@@ -3,14 +3,14 @@ package version
|
|||||||
// Version components
|
// Version components
|
||||||
const (
|
const (
|
||||||
Maj = "0"
|
Maj = "0"
|
||||||
Min = "19"
|
Min = "20"
|
||||||
Fix = "7"
|
Fix = "1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Version is the current version of Tendermint
|
// Version is the current version of Tendermint
|
||||||
// Must be a string because scripts like dist.sh read this file.
|
// Must be a string because scripts like dist.sh read this file.
|
||||||
Version = "0.19.7"
|
Version = "0.20.1"
|
||||||
|
|
||||||
// GitCommit is the current HEAD set using ldflags.
|
// GitCommit is the current HEAD set using ldflags.
|
||||||
GitCommit string
|
GitCommit string
|
||||||
|
Reference in New Issue
Block a user