Compare commits

..

369 Commits

Author SHA1 Message Date
Ethan Buchman
ca632c9e90 Merge pull request #527 from tendermint/release-v0.10.0
Release v0.10.0
2017-06-02 23:59:46 -04:00
Ethan Buchman
c94a92b30d update changelog and readme 2017-06-02 23:33:03 -04:00
Ethan Buchman
84fea82043 dist: dont mkdir in container 2017-06-02 23:20:02 -04:00
Ethan Buchman
d608e2b7ad bump version to 0.10.0 2017-06-02 23:19:30 -04:00
Ethan Buchman
d19d8e7914 update readme and changelog 2017-06-02 23:18:10 -04:00
Ethan Buchman
2b5b017253 Merge pull request #516 from tendermint/release-v0.10.0-rc2
fixes to changelog, config, default logging
2017-05-29 08:58:20 -04:00
Ethan Buchman
630c6ef7b5 bump version 2017-05-29 02:47:09 -04:00
Ethan Buchman
ee88272216 enable unsafe rpc routes in tests via flag 2017-05-26 14:20:23 -04:00
Ethan Buchman
bd7ec18c19 fix tests 2017-05-26 12:17:32 -04:00
Ethan Buchman
42626d9e16 [types] overwrite pubkey/addr in LoadPrivValidator. closes #500 2017-05-25 13:40:13 -04:00
Ethan Buchman
fc6611b2d9 [config] RPCConfig 2017-05-24 13:56:12 -04:00
Ethan Buchman
4f27752468 [rpc] dont enable unsafe by default; limit /blockchain_info to 20 blocks 2017-05-24 11:31:31 -04:00
Ethan Buchman
3fbe286e5a small fixes to changelog, config, default logging 2017-05-22 08:16:25 -04:00
Ethan Buchman
267f134d44 Merge pull request #508 from tendermint/release-v0.10.0-rc1
Release v0.10.0 rc1
2017-05-18 13:45:17 +02:00
Ethan Buchman
d4fa98de68 update version for rc1 2017-05-18 07:20:37 -04:00
Ethan Buchman
790e04ed3e add link to list of breaking funcs/methods 2017-05-18 12:32:34 +02:00
Ethan Buchman
772306cac8 update changelog and glide 2017-05-18 06:16:08 -04:00
Ethan Buchman
30a19fc899 [consensus] Info->Debug for is a validator log msg 2017-05-18 11:26:15 +02:00
Ethan Buchman
6a30a902c9 [types] more []byte->data.Bytes and some %X->%v 2017-05-17 01:08:41 +02:00
Ethan Buchman
883b71ca70 update CHANGELOG 2017-05-17 00:27:03 +02:00
Ethan Buchman
11b5d11e9e Merge pull request #505 from tendermint/dont-hash-accum
[types] dont hash validator.Accum
2017-05-17 00:19:28 +02:00
Ethan Buchman
6d83c60c40 [types] dont hash validator.Accum 2017-05-17 00:16:38 +02:00
Ethan Buchman
cc2b430e68 update glide and changelog 2017-05-17 00:12:56 +02:00
Ethan Buchman
9d7d8075d1 Merge remote-tracking branch 'origin/update-for-new-abci-api' into develop 2017-05-16 23:58:48 +02:00
Ethan Buchman
fe87623674 Merge pull request #501 from tendermint/feature/493-per-module-log-levels
Feature/493 per module log levels
2017-05-16 23:33:21 +02:00
Anton Kaliaev
91dc87e7c4 update for a new ABCI API 2017-05-16 19:06:35 +02:00
Anton Kaliaev
fb0df75de0 changes per Frey comments (Refs #493) 2017-05-16 15:16:50 +02:00
Ethan Buchman
e1792c1ea5 fix tx string format take 2 2017-05-16 14:12:48 +02:00
Ethan Buchman
d5113377e2 fix tx string format 2017-05-16 14:01:52 +02:00
Adrian Brink
eb9ca23250 log whether node is a validator in each round 2017-05-16 14:01:52 +02:00
Anton Kaliaev
05a8204508 per module log levels (Refs #493) 2017-05-16 13:57:28 +02:00
Adrian Brink
e041b2eee6 Merge pull request #502 from tendermint/adrianbrink-patch-1
Update README.md
2017-05-16 10:13:31 +02:00
Adrian Brink
0a4ab7e38f Update README.md 2017-05-16 09:50:52 +02:00
Ethan Buchman
c4ad0f76e6 Merge pull request #494 from tendermint/json-naming
Clean up json output
2017-05-15 19:03:25 +02:00
Ethan Buchman
0f1edcd57d Merge pull request #497 from tendermint/feature/color-code-different-consensus-instances
Color code different consensus instances in consensus tests
2017-05-15 19:02:02 +02:00
Adrian Brink
118d565534 Merge pull request #472 from tendermint/string_reprs
Add Tx String representation. Got the ok from Anton.
2017-05-15 09:49:40 +02:00
Anton Kaliaev
1dfb95f719 [consensus] color code different consensus instances in consensus tests
(Refs #492)
2017-05-15 09:35:29 +02:00
Ethan Buchman
33f807d9a1 Merge pull request #489 from tendermint/feature/adrian-#469
Use ld flags to set git hash instead of "revision_file"
2017-05-14 21:52:29 +02:00
Ethan Frey
926fb83e33 Re-added comment 2017-05-14 19:10:58 +02:00
Ethan Frey
2b324b7eb9 RPC returns pretty formated json 2017-05-14 19:06:34 +02:00
Ethan Frey
157ec8af2d Add json tags to validator set 2017-05-14 19:06:33 +02:00
Ethan Buchman
f14f167297 Merge pull request #491 from tendermint/feature/new-logging
New logging
2017-05-14 00:45:21 +02:00
Anton Kaliaev
4fe67652ff move SetLogger down 2017-05-14 00:24:58 +02:00
Anton Kaliaev
c5bccc5474 set missing logger on switch
```
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x882cec]

goroutine 328 [running]:
github.com/tendermint/tendermint/p2p.(*Switch).DialPeerWithAddress(0xc42000a500, 0xc4202088d0, 0xc420403500, 0x0, 0x0, 0x0)
        /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch.go:324 +0x2fc
github.com/tendermint/tendermint/p2p.(*PEXReactor).ensurePeers.func1(0xc4201663f0, 0xc4202088d0)
        /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/pex_reactor.go:280 +0x3e
created by github.com/tendermint/tendermint/p2p.(*PEXReactor).ensurePeers
        /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/pex_reactor.go:284 +0x5d4
```
2017-05-13 17:05:44 +02:00
Anton Kaliaev
f8fdbe3dbc changes as per Bucky's review 2017-05-13 16:22:51 +02:00
Anton Kaliaev
c9cd8de9c6 set logger 2017-05-13 10:25:00 +02:00
Anton Kaliaev
3e1343dc6b has as a base16 string 2017-05-13 10:24:59 +02:00
Anton Kaliaev
bc4e6566e7 [p2p] refactor upnp to use new logger 2017-05-13 10:24:59 +02:00
Ethan Buchman
16509ac3db p2p: fix race by peer.Start() before peers.Add() 2017-05-13 10:24:59 +02:00
Anton Kaliaev
f803544195 new logging 2017-05-13 10:24:58 +02:00
Anton Kaliaev
8eb07800b3 Merge pull request #473 from tendermint/logging-arch-proposal
Logging architecture proposal
2017-05-12 20:40:28 +02:00
Anton Kaliaev
538b325150 update arch proposal [ci skip] [circleci skip] 2017-05-12 20:32:57 +02:00
Adrian Brink
cd3c3c3bad Modify makefile 2017-05-09 11:39:46 +02:00
Adrian Brink
8c91014cd8 Add git commit hash to version. 2017-05-09 11:37:59 +02:00
Adrian Brink
6312eb91be Change "make build" to set GitCommit variable
As described above.
2017-05-06 12:37:48 +02:00
Ethan Frey
d2ae7e164a Make testify a testImport for consistency 2017-05-05 19:46:32 +02:00
Ethan Buchman
627a686f90 Merge pull request #487 from tendermint/fixp2ptests
One silly tests passes on osx, fails on linux...
2017-05-05 12:53:39 -04:00
Ethan Frey
57527f9f67 One silly tests passes on osx, fails on linux... comment out so i can develop 2017-05-05 18:48:39 +02:00
Ethan Buchman
14d0d395f2 Merge pull request #484 from tendermint/config
Config
2017-05-05 12:46:39 -04:00
Ethan Buchman
edd7263f06 fixes from review 2017-05-05 12:25:53 -04:00
Ethan Frey
2a9e89b34f Add basic tests on config, mainly to raise test coverage 2017-05-05 15:50:23 +02:00
Ethan Frey
dd1f5a2268 Test config parsing in root command 2017-05-05 15:30:39 +02:00
Ethan Buchman
8a0466d81d config: pex_reactor -> pex 2017-05-05 02:04:24 -04:00
Ethan Buchman
fb9d3842e4 test: p2p.seeds and p2p.pex 2017-05-05 01:28:43 -04:00
Ethan Buchman
8c3823545d update glide 2017-05-05 00:51:03 -04:00
Ethan Buchman
75989342b0 fixes from rebase 2017-05-04 23:03:42 -04:00
Ethan Buchman
46151720f8 fix tests 2017-05-04 22:46:41 -04:00
Ethan Buchman
9109b20852 SetRoot 2017-05-04 22:46:41 -04:00
Ethan Frey
6b059e0063 Accept relative paths in all configs, TODO: must SetRoot 2017-05-04 22:46:40 -04:00
Ethan Frey
92dee7ea3c Commands compile (mostly) with new config reading 2017-05-04 22:46:40 -04:00
Ethan Frey
604bf03f3a Pulled out all config structs (except p2p.PeerConfig) into config package 2017-05-04 22:46:40 -04:00
Ethan Buchman
f217f2b2c5 cleanup run_node flags 2017-05-04 22:46:13 -04:00
Ethan Buchman
92bafa7ecd consensus: fix tests 2017-05-04 22:46:13 -04:00
Ethan Buchman
6afee8f117 rpc: fix tests 2017-05-04 22:45:13 -04:00
Ethan Buchman
1ef7c1d25b cmd: fixes for new config 2017-05-04 22:43:55 -04:00
Ethan Buchman
7db7bbe464 node: ConfigFromViper 2017-05-04 22:43:55 -04:00
Ethan Buchman
57151d6043 p2p: use cmn instead of . 2017-05-04 22:43:55 -04:00
Ethan Buchman
5d660e073a remove viper from p2p 2017-05-04 22:43:55 -04:00
Ethan Buchman
4982cb4d1f fix tests for state and mempool 2017-05-04 22:43:55 -04:00
Ethan Buchman
24ce90fc72 fix up config defaults 2017-05-04 22:43:55 -04:00
Ethan Buchman
75b6c5215f fewer structs. remove viper from consensus 2017-05-04 22:43:55 -04:00
Ethan Buchman
d8fb226ec4 new config 2017-05-04 22:43:55 -04:00
Ethan Buchman
95c74b2ccd remove some more viper 2017-05-04 22:43:55 -04:00
Ethan Buchman
f0e7f0acf8 remove viper from rpc except test 2017-05-04 22:43:55 -04:00
Ethan Buchman
1fcc9dc654 remove viper from proxy 2017-05-04 22:39:22 -04:00
Ethan Buchman
7c0f51e24b remove viper from mempool 2017-05-04 22:39:22 -04:00
Ethan Buchman
29c0e6e4f4 remove viper from blockchain and state 2017-05-04 22:39:21 -04:00
Ethan Buchman
56a1a2d917 remove logrotate.config 2017-05-04 22:39:03 -04:00
Anton Kaliaev
4871e64b03 update docker README [ci skip] [circleci skip] 2017-05-04 13:36:27 +04:00
Anton Kaliaev
809e0e8c59 update Dockerfile [ci skip] [circleci skip] 2017-05-04 13:32:46 +04:00
Ethan Buchman
4305d054d6 Merge pull request #465 from tendermint/bytes
RPC Serialization Overhaul
2017-05-03 14:37:21 -04:00
Ethan Buchman
9860c8fee1 rpc: cleanup some comments [ci skip] 2017-05-03 14:33:07 -04:00
Ethan Frey
7ebf011fcd Fixed rpctypes.Request creation to new format 2017-05-03 16:58:21 +02:00
Ethan Frey
4a1b714ca4 All tests pass without go-wire json ptr madness 2017-05-03 16:45:00 +02:00
Ethan Frey
4c1d41c12e Test json rpc parsing 2017-05-03 16:26:18 +02:00
Ethan Frey
6ba799132c json.RawMessage in RPCRequest to defer parsing 2017-05-03 16:13:58 +02:00
Adrian Brink
e5d8fcd198 Merge pull request #474 from faddat/patch-4
Fix Mintnet Kubernetes link
2017-05-03 13:56:45 +02:00
Adrian Brink
f31d6ffb8c Fix Mintnet kubernetes url 2017-05-03 13:55:42 +02:00
Ethan Buchman
2608b95b4b Merge pull request #463 from tendermint/feature/adrian-initchain
InitChain message gets send at genesis.
2017-05-01 12:00:29 -04:00
Adrian Brink
2bf8c40cff Add extra memory to virtual machine and add coverage report to gitignore. 2017-05-01 17:29:44 +02:00
Jacob Gadikian
b5ab74fd38 Update README.md
updated mintnet to mintnet-kubernetes
2017-05-01 15:50:11 +07:00
Anton Kaliaev
556fbf0c4c logging arch proposal [ci skip] [circleci skip] 2017-05-01 11:58:43 +04:00
Jae Kwon
4c7a2be06a Add Tx String representation 2017-04-30 16:06:49 -07:00
Ethan Buchman
6dbcfb32d2 comment on copied wire file 2017-04-28 23:22:54 -04:00
Ethan Buchman
efeadcc0f4 some cleanup from review 2017-04-28 23:18:38 -04:00
Ethan Buchman
297772e009 Merge pull request #467 from tendermint/nowire
Nowire
2017-04-28 22:58:39 -04:00
Ethan Buchman
aa9e673ed7 test: jq .result[1] -> jq .result 2017-04-28 22:31:30 -04:00
Ethan Buchman
4e781961e9 remove TMResult. ::drinks champagne:: 2017-04-28 22:26:23 -04:00
Ethan Buchman
884060eb9b rpc/lib: no Result wrapper 2017-04-28 22:04:14 -04:00
Ethan Buchman
07e59e63f9 TMEventDataInner 2017-04-28 17:57:06 -04:00
Ethan Buchman
ac28b12fa8 add readReflectJSON from wire 2017-04-28 17:56:44 -04:00
Ethan Frey
257f45b768 ebuchman: added some demos on how to parse unknown types 2017-04-28 22:01:46 +02:00
Ethan Buchman
acfbea6d49 rpc: decode args without wire 2017-04-28 14:36:38 -04:00
Ethan Frey
6c60c07f16 BROKEN: attempt to replace go-wire.JSON with json.Unmarshall in rpc 2017-04-28 16:24:06 +02:00
Ethan Frey
bff8402fe8 Fix json for TMResult to not include "TMResultInner" 2017-04-28 15:26:06 +02:00
Ethan Frey
f6f1f1992c Prepare rpc responses for go-data compatibility, still use go-wire 2017-04-28 14:46:04 +02:00
Ethan Frey
194f345470 Use non-standard port so tests don't die when I am running basecoin 2017-04-28 14:45:34 +02:00
Ethan Buchman
2bf7e9c968 update glide 2017-04-27 19:58:12 -04:00
Ethan Buchman
c930f43cbe rpc: fix tests 2017-04-27 19:56:14 -04:00
Ethan Buchman
a518d08839 rpc: response types use Result instead of pb Response 2017-04-27 19:34:25 -04:00
Ethan Buchman
cdf650fba9 rpc: repsonse types use data.Bytes 2017-04-27 19:06:07 -04:00
Ethan Buchman
bdb34f9f4e types: []byte -> data.Bytes 2017-04-27 19:01:18 -04:00
Ethan Buchman
0be3480729 consensus: comment about test_data [ci skip] 2017-04-27 18:34:57 -04:00
Ethan Buchman
495283e2d4 fix replay tests and update test wals for InitChain 2017-04-27 18:30:43 -04:00
Adrian Brink
842609ddcb Send InitChain message from ABCI to Core on Genesis
InitChain is send from the ABCI to the Core node when the ABCI
app has no blocks stored.
2017-04-27 20:22:11 +02:00
Ethan Buchman
1310c72647 version bump and changelog 2017-04-26 20:06:45 -04:00
Ethan Buchman
cc6dde96c1 rpc -> rpc/lib and rpc/tendermint -> rpc 2017-04-26 19:57:33 -04:00
Ethan Buchman
cc7b2d26e5 Merge branch 'master' into develop 2017-04-26 19:21:06 -04:00
Ethan Buchman
1781a52147 Merge pull request #459 from tendermint/http_codes
rpc: use HTTP error codes
2017-04-26 19:05:43 -04:00
Ethan Buchman
0ba449c8ba Merge pull request #455 from tendermint/unstable
Unstable
2017-04-26 19:04:52 -04:00
Ethan Buchman
9851265d4f rpc: use HTTP error codes 2017-04-25 23:09:47 -04:00
Ethan Buchman
098646c5ff test: test_libs all use Makefile 2017-04-25 18:35:22 -04:00
Ethan Buchman
0e5cd6dc2f Merge pull request #442 from tendermint/viper
go-config -> viper, commands: Run -> RunE
2017-04-25 17:59:17 -04:00
Ethan Buchman
2fcb2b9232 remove unsafe_set_config 2017-04-25 17:58:26 -04:00
Ethan Buchman
fcf78a5da7 cleanup go-config/viper and some unnamed imports 2017-04-25 14:54:56 -04:00
rigel rozanski
72c4be35e8 tiny fix 2017-04-25 13:44:13 -04:00
Rigel Rozanski
5d0c2a1414 commands: Run -> RunE 2017-04-25 13:44:13 -04:00
Rigel Rozanski
7bb638e3b8 fix test_integrations error 2017-04-25 13:44:13 -04:00
Rigel Rozanski
7448753257 fixing tests 2017-04-25 13:43:57 -04:00
Rigel Rozanski
6e662337ff dont export resetPrivValidator 2017-04-25 13:43:57 -04:00
Rigel Rozanski
270b68a893 glide lock updates 2017-04-25 13:43:22 -04:00
Rigel Rozanski
cefb2bede0 adding viper
int

int
2017-04-25 13:42:22 -04:00
rigelrozanski
47852122d0 changed reset commands 2017-04-25 13:34:46 -04:00
Ethan Buchman
00055fa2e8 Merge pull request #456 from tendermint/repo-merge
Repo merge
2017-04-25 13:13:32 -04:00
Ethan Frey
bd93f76950 Improve rpc to properly format any alias for []byte in URIClient 2017-04-25 17:17:51 +02:00
Ethan Frey
803b1f2115 Improve client test cases 2017-04-25 16:49:01 +02:00
Ethan Buchman
3cdd2daf08 fix tests 2017-04-21 18:44:37 -04:00
Ethan Buchman
4e0afc55e6 glide update 2017-04-21 18:35:48 -04:00
Ethan Buchman
e160318eef glide update 2017-04-21 18:21:59 -04:00
Ethan Buchman
56c60fba23 go-p2p -> tendermint/p2p 2017-04-21 18:19:41 -04:00
Ethan Buchman
9e82d132ce go-rpc -> tendermint/rpc 2017-04-21 18:19:29 -04:00
Ethan Buchman
a70c95b79e tmlibs/common/test -> tmlibs/test 2017-04-21 18:18:22 -04:00
Ethan Buchman
d5b524e309 go-merkle -> merkleeyes/iavl and tmlibs/merkle 2017-04-21 18:16:05 -04:00
Ethan Buchman
e6fe6b5b76 go-data -> go-wire/data 2017-04-21 18:13:25 -04:00
Ethan Buchman
d1926bcad1 use tmlibs 2017-04-21 18:12:54 -04:00
Ethan Buchman
fa451fc55c tendermint/rpc -> tendermint/rpc/tendermint 2017-04-21 18:10:41 -04:00
Ethan Buchman
5da9b3a803 postmerge 2017-04-21 18:09:47 -04:00
Ethan Buchman
93c58d0b24 remove glide and license from rpc and p2p 2017-04-21 18:08:25 -04:00
Ethan Buchman
23a6a6f8fc move into p2p package 2017-04-21 18:07:52 -04:00
Ethan Buchman
63f546497b Merge remote-tracking branch 'p2p/develop' into repo-merge 2017-04-21 18:06:57 -04:00
Ethan Buchman
c55d83281a move into rpc package 2017-04-21 18:05:39 -04:00
Ethan Buchman
35f1db09a9 Merge remote-tracking branch 'rpc/develop' into repo-merge 2017-04-21 18:04:42 -04:00
Ethan Buchman
34965f610d crypto Wrap/Unwrap 2017-04-21 18:02:25 -04:00
Ethan Buchman
eaeb547938 use tmlibs 2017-04-21 17:53:22 -04:00
Ethan Buchman
15d5b2ac49 use tmlibs 2017-04-21 17:51:11 -04:00
Ethan Buchman
992b11c450 premerge2: rpc -> rpc/tendermint 2017-04-21 17:39:56 -04:00
Ethan Buchman
0017fb7ffe premerge 2017-04-21 17:38:40 -04:00
Ethan Buchman
3240ce21b8 update glide 2017-04-21 17:28:13 -04:00
Ethan Frey
543eea4f4e update deps to unstable 2017-04-21 16:56:10 -04:00
Ethan Frey
6d223d5526 Update to latest go-crypto 2017-04-21 16:55:58 -04:00
Ethan Frey
3d9ca32e95 Update all config for p2p integration tests 2017-04-21 16:55:38 -04:00
Ethan Frey
90abc61c56 Improve go-data json support in rpc 2017-04-21 16:55:37 -04:00
Ethan Frey
6a0217688f Ensure private validator addresses are hex 2017-04-21 16:51:17 -04:00
Ethan Frey
b798169c6e Update go-crypto to read/write properly with go-wire in wal files 2017-04-21 16:51:17 -04:00
Anton Kaliaev
7e56aad51a [consensus/test_data/build.sh] install tendermint if absent 2017-04-21 16:51:17 -04:00
Ethan Frey
e325ffc681 Lots of updates to use new go-crypto / json style 2017-04-21 16:51:17 -04:00
Ethan Frey
516e78ea54 Fix types to use updated go-crypto 2017-04-21 16:50:27 -04:00
Ethan Buchman
2c8df0ee6b Merge pull request #17 from tendermint/develop
v0.7.0
2017-04-21 13:08:14 -04:00
Ethan Buchman
e8f33a4784 Merge pull request #25 from tendermint/develop
v0.5.0
2017-04-21 13:06:44 -04:00
Ethan Buchman
58ccefa407 update changelog 2017-04-21 13:06:26 -04:00
Ethan Buchman
559613689d Merge pull request #18 from tendermint/bugfix/fix-backward-compatibility-for-ws
fix backward compatibility for WS
2017-04-21 12:20:34 -04:00
Ethan Buchman
a01cff9ce6 jsonParamsToArgsRPC func 2017-04-21 12:18:21 -04:00
Anton Kaliaev
d6fd0c4ca0 fix backward compatibility for WS 2017-04-21 18:30:22 +03:00
Ethan Buchman
17124989a9 Merge pull request #10 from tendermint/pex-reactor-fixes-#9
Pex reactor fixes #9
2017-04-20 17:32:38 -04:00
Ethan Buchman
75bad132fc msgCountByPeer is a CMap 2017-04-20 17:29:43 -04:00
Ethan Buchman
391c738959 update comment about outbound peers and addrbook 2017-04-20 12:21:45 -04:00
Anton Kaliaev
8655e2456e it is non-deterministic (could fail sometimes) 2017-04-20 13:37:06 +04:00
Anton Kaliaev
17ec70fc09 revert 2710873 2017-04-20 13:36:40 +04:00
Anton Kaliaev
9ce71013df revert e448199 2017-04-20 13:36:40 +04:00
Anton Kaliaev
5ab8ca0868 fix race 2017-04-20 13:36:40 +04:00
Anton Kaliaev
4c0d1d3ad2 return wg to addrbook 2017-04-20 13:36:39 +04:00
Anton Kaliaev
0277e52bd5 fix merge 2017-04-20 13:36:39 +04:00
Anton Kaliaev
cf18bf2966 add public RemoveAddress API
after discussion with @ebuchman (https://github.com/tendermint/go-p2p/pull/10#discussion_r96471729)
2017-04-20 13:36:39 +04:00
Anton Kaliaev
324293f4cb note on preventing abuse [ci skip] 2017-04-20 13:36:39 +04:00
Anton Kaliaev
52d9cf080e make GoLint happy 2017-04-20 13:36:39 +04:00
Anton Kaliaev
590efc1040 call saveToFile OnStop
This is better than waiting because while we wait, anything could happen
(crash, timeout of the code who's using addrbook, ...). If we save
immediately, we have much greater chances of success.
2017-04-20 13:36:38 +04:00
Anton Kaliaev
5eeaffd38e do not create file, just temp dir 2017-04-20 13:36:38 +04:00
Anton Kalyaev
07e7b98c70 improve ensurePeers routine
optimizations:

- if we move peer to the old bucket as soon as connected and pick only
  from new group, we can skip alreadyConnected check
2017-04-20 13:36:38 +04:00
Anton Kalyaev
873d34157d prevent abuse from peers 2017-04-20 13:36:38 +04:00
Anton Kalyaev
47df1fb7d4 test PEXReactor#Receive 2017-04-20 13:36:38 +04:00
Anton Kalyaev
1a59b6a3b4 replace repeate timer with simple ticker
no need for repeate timer here (no need for goroutine safety)
2017-04-20 13:36:38 +04:00
Anton Kalyaev
0109f1e524 test ensurePeers goroutine 2017-04-20 13:36:37 +04:00
Anton Kalyaev
37d5a2cf3e implement RemovePeer for PEXReactor 2017-04-20 13:36:37 +04:00
Anton Kalyaev
3af7c67757 add Dockerfile 2017-04-20 13:36:37 +04:00
Anton Kalyaev
26f661a5dd prefer short names 2017-04-20 13:36:37 +04:00
Anton Kalyaev
057cfb30f1 remove unused error 2017-04-20 13:36:37 +04:00
Ethan Buchman
1a42f946dc version bump 2017-04-19 00:05:18 -04:00
Ethan Buchman
e05052b079 update glide 2017-04-19 00:01:55 -04:00
Ethan Buchman
7d5b62b61f CHANGELOG and version bump 2017-04-18 23:58:24 -04:00
Ethan Buchman
e6b7e66bbe Merge pull request #23 from tendermint/more-tests
More tests
2017-04-18 20:26:54 -04:00
Anton Kaliaev
2ac69176e1 add a comment for MConnection#CanSend
also add a note to TestMConnectionSend
2017-04-18 12:11:48 +04:00
Anton Kaliaev
fbedb426ce tests for NetAddress 2017-04-14 16:37:07 +04:00
Anton Kaliaev
6dc113aa80 [netaddress] panic only when normal run 2017-04-14 14:56:02 +04:00
Anton Kaliaev
ebe23f1379 refactor MConnection#sendBytes 2017-04-14 14:21:58 +04:00
Anton Kaliaev
06d219db8e test peer with no auth enc 2017-04-14 12:43:28 +04:00
Anton Kaliaev
1d01f6af98 2 kinds of peers: outbound and inbound 2017-04-13 12:36:16 +04:00
Anton Kaliaev
715b8c629f use the peer struct to simulate remote peer 2017-04-13 12:09:43 +04:00
Anton Kaliaev
a63e1bb2dc fix possible panic 2017-04-13 12:08:57 +04:00
Anton Kaliaev
5965578c56 [fuzz] only one way to set config variables 2017-04-13 11:55:14 +04:00
Ethan Buchman
4671c44b2d Merge pull request #13 from tendermint/allow-for-multiple-restarts
[WSClient] allow for multiple restarts
2017-04-12 19:32:24 -04:00
Ethan Buchman
052c2c1575 Merge pull request #11 from tendermint/feature/refactor-tests
WSClient failing to echo bytes
2017-04-12 19:32:14 -04:00
Ethan Buchman
4b30cb3083 test: check err on cmd.Wait 2017-04-12 19:30:05 -04:00
Ethan Buchman
8c38543357 fix error msg 2017-04-12 18:15:51 -04:00
Ethan Buchman
c3295f4878 RPCRequest.Params can be map[string]interface{} or []interface{} 2017-04-12 13:42:19 -04:00
Anton Kaliaev
7dcc3dbcd1 test peer 2017-04-12 16:55:17 +04:00
Ethan Buchman
c39e001a95 Merge pull request #22 from tendermint/persistent
fix closing conn
2017-04-11 13:40:06 -04:00
Ethan Buchman
8067cdb5f2 fix closing conn 2017-04-11 12:42:11 -04:00
Ethan Frey
9d18cbe74e Remove race condition between read go-routine and stop 2017-04-11 13:38:15 +02:00
Ethan Buchman
ebd3929c0d Merge pull request #18 from tendermint/13-reconnect-to-seeds
persistent peers (Refs 13)
2017-04-10 16:21:02 -04:00
Ethan Buchman
9a1a6c56b4 dont expose makePersistent 2017-04-10 16:05:00 -04:00
Ethan Buchman
b6f744c732 fix AddPeerWithConnection 2017-04-10 16:03:14 -04:00
Ethan Buchman
a9bb6734e7 SetDeadline for authEnc. Stop peer if Add fails 2017-04-10 16:02:01 -04:00
Anton Kaliaev
8bb3a2e1d7 persistent peers (Refs #13) 2017-04-10 22:47:05 +04:00
Anton Kaliaev
f88d56b2f8 add glide 2017-04-10 22:47:04 +04:00
Anton Kaliaev
5b0489cdb4 use plain struct instead of go-config 2017-04-10 22:46:49 +04:00
Anton Kaliaev
b8a939a894 test non persistent mconnection 2017-04-10 22:46:48 +04:00
Anton Kaliaev
2b02843453 remove unused const 2017-04-10 22:46:48 +04:00
Anton Kaliaev
5be72672fe use golang time datatype instead of time units in name 2017-04-10 22:46:48 +04:00
Anton Kaliaev
549d3bd09a tests for MConnection 2017-04-10 22:46:48 +04:00
Anton Kaliaev
868017cf1a import go-common as cmn 2017-04-10 22:46:48 +04:00
Anton Kaliaev
ba5382b70e open result&error channels on start 2017-03-28 14:17:40 +04:00
Anton Kaliaev
b0d2032488 use BaseService.OnReset method to recreate channels 2017-03-28 14:01:22 +04:00
Anton Kaliaev
a416c37ebd Merge pull request #12 from tendermint/close-ws-connection
close ws connection on Stop
2017-03-27 20:50:18 +04:00
Anton Kaliaev
d6587be7bc [WSClient] allow for multiple restarts
needed for 3044f66ba9
See https://github.com/tendermint/tools/issues/6
2017-03-21 22:08:08 +04:00
Anton Kaliaev
afc39febed close ws connection on Stop 2017-03-21 22:02:25 +04:00
Anton Kaliaev
b54b9b4ecc update url to network monitor [ci skip] [circleci skip] 2017-03-13 14:25:57 +04:00
Anton Kaliaev
5d19a008ce add Call method to WSClient, which does proper encoding of params 2017-03-10 15:33:45 +04:00
Anton Kaliaev
3233c9c003 WSClient failed to "echo_bytes"
Error:
```
Expected nil, but got: encoding/hex: invalid byte: U+0078 'x'
```
2017-03-10 14:56:04 +04:00
Anton Kaliaev
c88257b038 rename rpc function status to echo
echo means we're returning the input, which is exactly what this
function does.
2017-03-10 12:57:14 +04:00
Anton Kaliaev
0874c72819 refactor tests 2017-03-10 12:52:40 +04:00
Anton Kaliaev
d66ebbd904 use testify package 2017-03-10 12:03:16 +04:00
Anton Kaliaev
1a3573bf17 Merge pull request #10 from tendermint/feature/4-rename-http-clients
rename ClientURI -> URIClient, ClientJSONRPC -> JSONRPCClient
2017-03-10 10:42:22 +04:00
Anton Kaliaev
e6c083f589 rename ClientURI -> URIClient, ClientJSONRPC -> JSONRPCClient (Refs #4) 2017-03-10 10:41:10 +04:00
Anton Kaliaev
759060f47e Merge pull request #9 from tendermint/feature/8-http-interface-and-1-key-value-params-json-rpc
support key-value params in JSONRPC
2017-03-10 10:29:00 +04:00
Ethan Frey
715f78e26a Properly encode json.RawMessage 2017-03-10 10:20:38 +04:00
Anton Kaliaev
db69845ded introduce errors pkg 2017-03-09 19:01:37 +04:00
Anton Kaliaev
ff90224ba8 fix "Expected map but got type string" error
Error from tendermint:

```
panic: Expected map but got type string [recovered]
        panic: Expected map but got type string

goroutine 82 [running]:
testing.tRunner.func1(0xc420464000)
        /usr/local/go/src/testing/testing.go:622 +0x29d
panic(0xa1fda0, 0xc4201eecd0)
        /usr/local/go/src/runtime/panic.go:489 +0x2cf
github.com/tendermint/tendermint/rpc/test.waitForEvent(0xc420464000, 0xc420064000, 0xae6fae, 0x8, 0xae6f01, 0xc2e998, 0xc2e9a0)
        /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/test/helpers.go:179 +0x53a
github.com/tendermint/tendermint/rpc/test.TestWSNewBlock(0xc420464000)
        /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/test/client_test.go:190 +0x12e
testing.tRunner(0xc420464000, 0xc2e9a8)
        /usr/local/go/src/testing/testing.go:657 +0x96
created by testing.(*T).Run
        /usr/local/go/src/testing/testing.go:697 +0x2ca
```
2017-03-09 19:01:32 +04:00
Anton Kaliaev
720b74d89e read from ErrorsCh also 2017-03-09 17:44:00 +04:00
Anton Kaliaev
05e1a22d5b encode params before sending in JSONRPC 2017-03-09 13:46:48 +04:00
Anton Kaliaev
cf11e6ba65 add CHANGELOG 2017-03-09 12:43:24 +04:00
Anton Kaliaev
1ddb60b6e7 refactor jsonParamsToArgs
Suggested in https://github.com/tendermint/go-rpc/pull/9#discussion_r105098390
2017-03-09 12:23:21 +04:00
Anton Kaliaev
fed84f875c fix jsonParamsToArgsWS index error
Error from tendermint:
```
panic: runtime error: index out of range

goroutine 82 [running]:
github.com/tendermint/tendermint/vendor/github.com/tendermint/go-rpc/server.jsonParamsToArgsWS(0xc4200960e0, 0xc42024d4a0, 0xc420215380, 0x3, 0x0, 0x0, 0xc420215383, 0x9, 0xc42024d4a0, 0xf1ecc0, ...)
        /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-rpc/server/handlers.go:184 +0x654
github.com/tendermint/tendermint/vendor/github.com/tendermint/go-rpc/server.(*wsConnection).readRoutine(0xc4201fd0e0)
        /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-rpc/server/handlers.go:496 +0x3a9
created by github.com/tendermint/tendermint/vendor/github.com/tendermint/go-rpc/server.(*wsConnection).OnStart
        /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-rpc/server/handlers.go:377 +0x45
```
2017-03-08 17:55:08 +04:00
Anton Kaliaev
1842e03315 revert using local import
this breaks the client's code (e.g. tendermint)
2017-03-08 17:33:46 +04:00
Anton Kaliaev
d033cd54b8 add editorconfig 2017-03-08 17:17:42 +04:00
Anton Kaliaev
2dc6ab3896 use golang default if an arg is missing (Refs #7) 2017-03-08 17:16:01 +04:00
Anton Kaliaev
6d66cc68ed make sure we are using correct server
also remove it afterwards
2017-03-08 16:55:15 +04:00
Anton Kaliaev
51d760f29f use local import for testing 2017-03-08 16:23:38 +04:00
Anton Kaliaev
22ba8bdef8 fix Call method signature in HTTPClient interface 2017-03-08 10:26:13 +04:00
Anton Kaliaev
d43e3db978 fix circleci 2017-03-07 19:28:00 +04:00
Anton Kaliaev
26ccb4c94a remove private call methods
Q: what was the reason to create them?
2017-03-07 19:27:52 +04:00
Anton Kaliaev
c128957723 "must remove file for test to run again" - no way I am doing this by hands, too lazy :) 2017-03-07 19:27:38 +04:00
Anton Kaliaev
66867bf949 remove "rpc" prefix from package imports 2017-03-07 19:27:32 +04:00
Anton Kaliaev
e1d5873bdf support key-value params in JSONRPC (Refs #1)
More changes:

- remove Client interface (reason: empty)
- introduce HTTPClient interface, which can be used for both ClientURI
  and ClientJSONRPC clients (so our users don't have to create their own) (Refs #8)
- rename integration tests script to `integration_test.sh`
- do not update deps on `get_deps`
2017-03-07 19:27:27 +04:00
Anton Kaliaev
b03facd828 add Dockerfile 2017-03-07 18:34:13 +04:00
Ethan Buchman
97a5ed2d1a Merge pull request #16 from tendermint/develop
v0.4.0
2017-03-06 03:11:04 -05:00
Ethan Buchman
53d777a2d5 CHANGELOG.md 2017-03-06 01:30:41 -05:00
Ethan Buchman
beb3eda438 fix addrbook start/stop 2017-03-05 22:59:18 -05:00
Ethan Buchman
c94bc2bc2b DialSeeds takes an addrbook 2017-03-05 21:57:07 -05:00
Ethan Buchman
56eebb95ee Merge pull request #12 from tendermint/bugfix/pex-issues-335
PEX issues #335
2017-03-04 23:19:51 -05:00
Ethan Buchman
88b5c724f2 remove public addr book funcs from pex 2017-03-04 22:55:42 -05:00
Anton Kaliaev
65b1756978 expose 2 API functions for tendermint#node/node.go 2017-03-04 22:44:25 -05:00
Anton Kaliaev
108beae7a8 more tests for AddrBook 2017-03-04 22:44:25 -05:00
Anton Kaliaev
2773410de4 prevent nil addr
Error:

```
Error: runtime error: invalid memoryaddress or nil pointer dereference\nStack:
goroutine 549 [running]:\nruntime/debug.Stack(0x0, 0x0,
0x0)\n\t/usr/local/go/src/runtime/debug/stack.go:24
+0x80\ngithub.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p.(*MConnection)._recover(0xc821723b00)\n\t/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p/connection.go:173
+0x53\npanic(0xbe1500, 0xc820012080)\n\t/usr/local/go/src/runtime/panic.go:443
+0x4e9\ngithub.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p.(*NetAddress).Valid(0x0,
0x0)\n\t/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p/netaddress.go:125
+0x1c\ngithub.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p.(*NetAddress).Routable(0x0,
0xc8217bb740)\n\t/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p/netaddress.go:117
+0x25\ngithub.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p.(*AddrBook).addAddress(0xc820108380,
0x0,
0xc821739590)\n\t/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p/addrbook.go:524
+0x45\ngithub.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p.(*AddrBook).AddAddress(0xc820108380,
0x0,
0xc821739590)\n\t/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p/addrbook.go:160
+0x286\ngithub.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p.(*PEXReactor).Receive(0xc82000be60,
0xc820149f00, 0xc8218163f0, 0xc82184e000, 0x5b,
0x1000)\n\t/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p/pex_reactor.go:109
+0x457\ngithub.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p.newPeer.func1(0xc82011d500,
0xc82184e000, 0x5b,
0x1000)\n\t/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p/peer.go:58
+0x202\ngithub.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p.(*MConnection).recvRoutine(0xc821723b00)\n\t/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p/connection.go:439
+0x1177\ncreated by
github.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p.(*MConnection).OnStart\n\t/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-p2p/connection.go:138
+0x1a1\n
```
2017-03-04 22:40:48 -05:00
Anton Kalyaev
e7656873c1 public save API 2017-03-04 22:40:48 -05:00
Anton Kalyaev
332f7056f7 start/stop the book with reactor
Refs https://github.com/tendermint/tendermint/issues/335
2017-03-04 22:40:48 -05:00
Ethan Buchman
17e6ae813f Merge pull request #15 from tendermint/seedsfix
Seedsfix
2017-03-03 17:33:23 -05:00
Ethan Buchman
dab31d0166 version bump to 0.4.0 2017-03-03 17:30:38 -05:00
Ethan Buchman
0e7baf027b some dial seeds fixes 2017-03-03 16:42:10 -05:00
rigelrozanski
26275ba66c dial seeds error handling 2017-03-03 16:42:10 -05:00
Ethan Buchman
fcea0cda21 Merge pull request #6 from tendermint/develop
v0.6.0
2017-01-12 22:02:40 -05:00
Ethan Buchman
6177eb8398 love you circley 2017-01-12 22:01:20 -05:00
Ethan Buchman
ac443fa61f run tests from bash script 2017-01-12 21:59:02 -05:00
Ethan Buchman
08f2b5bc84 get deps for testing 2017-01-12 21:51:34 -05:00
Ethan Buchman
de56442660 Merge branch 'master' into develop 2017-01-12 21:40:43 -05:00
Ethan Buchman
3d98f675f3 Merge pull request #11 from tendermint/develop
Develop
2017-01-12 21:39:57 -05:00
Ethan Buchman
67c9086b74 optional panic on AddPeer err 2017-01-12 21:09:24 -05:00
Ethan Buchman
e47722ecb2 Connect2Switches: panic on err 2017-01-12 20:49:48 -05:00
Jae Kwon
2b750ea49f Make Connect2Switches blocking 2017-01-12 20:49:48 -05:00
Ethan Buchman
58e42397f8 close conns on filter; fix order in MakeConnectedSwitch 2017-01-12 20:49:48 -05:00
Jae Kwon
bd353e004a QuitService->BaseService 2017-01-12 20:49:48 -05:00
Ethan Buchman
94fed25975 fix test 2017-01-12 10:22:23 -05:00
Ethan Buchman
0eb278ad3b version bump 0.6.0 2017-01-12 00:13:20 -05:00
Ethan Buchman
b494cc5219 Merge pull request #5 from mappum/develop
Fix hex string handling
2017-01-12 00:12:11 -05:00
Matt Bell
4d7aa62a10 Added test for unexpected hex string type HTTP args 2017-01-07 20:40:45 -08:00
Matt Bell
86506cd4f8 Handle quoted and hex string type HTTP args for both 'string' and '[]byte' type function args 2017-01-07 20:40:29 -08:00
Matt Bell
af1212897c Exit early in bash tests 2017-01-07 14:00:27 -08:00
Matt Bell
34a806578a Handle hex strings and quoted strings in HTTP params
Use 0x-prefixed hex strings in client

server: Decode hex string args

Encode all string args as 0x<hex> without trying to encode as JSON

Added tests for special string arguments

Fix server handling quoted string args

Added string arg handling test cases to bash test script
2017-01-07 13:59:33 -08:00
Jae Kwon
eab2baa363 use go-flowrate instead of flowcontrol 2016-12-04 18:20:37 -08:00
Ethan Buchman
2cee364692 addrbook: toggle strict routability 2016-11-30 22:57:21 -05:00
Jae Kwon
161e36fd56 QuitService->BaseService 2016-10-28 12:04:58 -07:00
Ethan Buchman
e6e3853dc7 Merge pull request #3 from tendermint/develop
use EventSwitch interface; less logging
2016-10-20 21:31:26 -04:00
Ethan Buchman
1eb390680d Merge pull request #8 from tendermint/develop
update MakeConnectedSwitches
2016-10-10 14:48:58 -04:00
Ethan Buchman
855255d73e use EventSwitch interface; less logging 2016-10-10 03:22:34 -04:00
Ethan Buchman
153ac88672 update MakeConnectedSwitches 2016-09-14 00:57:53 -04:00
Ethan Buchman
f508f3f20b Merge pull request #6 from tendermint/develop
filter conn by addr/pubkey. closes #3
2016-09-09 19:45:12 -04:00
Ethan Buchman
642901d5aa filter conn by addr/pubkey. closes #3 2016-08-25 13:46:43 -04:00
Ethan Buchman
479510be0e support full urls (with eg tcp:// prefix) 2016-08-10 01:13:13 -04:00
Ethan Buchman
dea910cd3e Makefile: go test --race 2016-07-22 01:15:52 -04:00
Ethan Buchman
39ee59c26e server: return result with error 2016-07-22 01:13:16 -04:00
Ethan Buchman
929cf433b9 fix chDesc race 2016-06-26 00:34:34 -04:00
Ethan Buchman
711d2541f5 MakeConnectedSwitches function 2016-06-26 00:34:23 -04:00
Ethan Buchman
a8ac819139 link issue from readme 2016-06-23 20:40:16 -04:00
Ethan Buchman
a44e0e0f4b add test example 2016-06-23 20:37:35 -04:00
Ethan Buchman
0c70a4636a add better docs to readme 2016-06-23 20:33:04 -04:00
Ethan Buchman
7ea86f6506 fix test race and update readme 2016-06-21 15:19:56 -04:00
Ethan Buchman
7376a72dd7 circle badge 2016-06-21 14:40:24 -04:00
Ethan Buchman
981c6868ad fix race conditions in tests 2016-06-21 14:35:29 -04:00
Ethan Buchman
5bd7692323 version bump 2016-05-12 00:10:46 -04:00
Ethan Buchman
6684a18730 fix test 2016-05-12 00:08:41 -04:00
Ethan Buchman
0dc6ebc325 configurable fuzz conn 2016-05-12 00:00:52 -04:00
Ethan Buchman
ffbd6d8782 drop the p2p_ 2016-05-11 23:47:51 -04:00
Jae Kwon
7d997ca8e6 No global config 2016-05-08 14:59:27 -07:00
Jae Kwon
1d9e89812a Remove go-alert dependency 2016-05-08 14:58:28 -07:00
Ethan Buchman
e8538d606a add blank client interface 2016-05-04 10:39:43 -04:00
Ethan Buchman
78c9d526c3 change some log.Info to log.Debug 2016-03-29 11:37:51 -07:00
Jae Kwon
4baf007fb8 Merge pull request #2 from tendermint/develop
make some params configurable
2016-03-27 20:44:10 -07:00
Ethan Buchman
69c7ae5e3f version bump 2016-03-17 05:05:02 -04:00
Ethan Buchman
114d90bec8 don't reallocate on recvMsgPacket 2016-03-17 05:05:02 -04:00
Jae Kwon
10619248c6 Use go-crypto CRand* 2016-03-13 09:42:36 -07:00
Ethan Buchman
389e4b8b69 config: toggle authenticated encryption 2016-03-10 19:07:01 -05:00
Jae Kwon
1bc871162d Conform to new go-config behavior; ApplyConfig not needed 2016-03-06 12:33:07 -08:00
Ethan Buchman
f28f791fff make some params configurable 2016-03-06 12:24:01 -08:00
Jae Kwon
7f6aad20fb Add note on nondeterminism of Broadcast 2016-03-04 22:04:05 -08:00
Ethan Buchman
1410693eae support unix domain websockets 2016-02-19 02:05:24 +00:00
Ethan Buchman
74130008f7 deduplicate dialFunc 2016-02-19 00:20:20 +00:00
Ethan Buchman
6607232a5d add support for unix sockets 2016-02-18 22:45:55 +00:00
Jae Kwon
1370f89864 Fix bug in receiveEventsRoutine error handling 2016-02-08 02:20:34 -08:00
Jae Kwon
8b7969d6ea Add comments about goroutine-safety 2016-02-08 00:58:12 -08:00
Ethan Buchman
45f57198cc client: wsc.String() 2016-02-03 02:01:28 -05:00
Ethan Buchman
fbc5ac8052 print method in client log 2016-01-21 23:03:39 -05:00
Ethan Buchman
b9eec7e438 version bump 2016-01-20 13:07:57 -05:00
Ethan Buchman
9bc75eaf24 move DialSeeds in from tendermint/tendermint/node 2016-01-20 11:32:23 -05:00
Jae Kwon
0380e404bd Conform to go-wire version 0.6.0 2016-01-17 21:30:06 -08:00
Ethan Buchman
14735d5eb5 RawMessage fix 2016-01-13 22:16:56 -05:00
Ethan Buchman
91c734d02e client: ResultsCh chan json.RawMessage, ErrorsCh 2016-01-13 21:21:16 -05:00
Ethan Buchman
aff561d8c3 RPCResponse.Result is json.RawMessage 2016-01-13 18:37:35 -05:00
Ethan Buchman
0bcae125c2 use comma separated string for arg names 2016-01-12 18:29:31 -05:00
Ethan Buchman
3d59e13dd8 move from tendermint/tendermint 2016-01-12 16:50:06 -05:00
Ethan Buchman
c52524a215 Initial commit 2016-01-12 15:26:00 -05:00
Jae Kwon
1f2c1d0760 Fix prioritization logic; Add Status() 2016-01-03 06:20:18 -08:00
Jae Kwon
3abc18d7ba Add MConnection.Status() 2016-01-02 20:53:10 -08:00
Jae Kwon
0c9b9fe8bb Change license to Apache2.0 2015-12-23 14:08:39 -08:00
Jae Kwon
44d8e62689 Confirm to go-wire new TypeByte behavior 2015-12-21 12:55:52 -08:00
Jae Kwon
8b308c1c08 Tweak send/recv rates for performance test 2015-12-09 13:53:50 -08:00
Jae Kwon
4347b91b89 Remove wrong comment 2015-12-05 09:48:08 -08:00
Jae Kwon
432a37857d Add RemoteAddr and ListenAddr to NodeInfo; Refactor IPRange logic 2015-12-05 09:44:03 -08:00
Jae Kwon
c37e25e76b Conform to go-common WriteFile*(path, mode) 2015-12-04 00:02:44 -08:00
Jae Kwon
1c628a97ad Conform to go-wire 1.0 2015-11-10 12:29:43 -08:00
Jae Kwon
abc3a2cc3c initial commit 2015-10-25 18:21:51 -07:00
204 changed files with 11354 additions and 2316 deletions

1
.gitignore vendored
View File

@@ -14,3 +14,4 @@ vendor
test/p2p/data/
test/logs
.glide
coverage.txt

View File

@@ -1,5 +1,82 @@
# Changelog
## 0.10.0 (June 2, 2017)
Includes major updates to configuration, logging, and json serialization.
Also includes the Grand Repo-Merge of 2017.
BREAKING CHANGES:
- Config and Flags:
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11),
containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig`
- This affects the following flags:
- `--seeds` is now `--p2p.seeds`
- `--node_laddr` is now `--p2p.laddr`
- `--pex` is now `--p2p.pex`
- `--skip_upnp` is now `--p2p.skip_upnp`
- `--rpc_laddr` is now `--rpc.laddr`
- `--grpc_laddr` is now `--rpc.grpc_laddr`
- Any configuration option now within a substract must come under that heading in the `config.toml`, for instance:
```
[p2p]
laddr="tcp://1.2.3.4:46656"
[consensus]
timeout_propose=1000
```
- Use viper and `DefaultConfig() / TestConfig()` functions to handle defaults, and remove `config/tendermint` and `config/tendermint_test`
- Change some function and method signatures to
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) accomodate new config
- Logger
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`.
See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details
- Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!)
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger
- JSON serialization:
- Replace `[TypeByte, Xxx]` with `{"type": "some-type", "data": Xxx}` in RPC and all `.json` files by using `go-wire/data`. For instance, a public key is now:
```
"pub_key": {
"type": "ed25519",
"data": "83DDF8775937A4A12A2704269E2729FCFCD491B933C4B0A7FFE37FE41D7760D0"
}
```
- Remove type information about RPC responses, so `[TypeByte, {"jsonrpc": "2.0", ... }]` is now just `{"jsonrpc": "2.0", ... }`
- Change `[]byte` to `data.Bytes` in all serialized types (for hex encoding)
- Lowercase the JSON tags in `ValidatorSet` fields
- Introduce `EventDataInner` for serializing events
- Other:
- Send InitChain message in handshake if `appBlockHeight == 0`
- Do not include the `Accum` field when computing the validator hash. This makes the ValidatorSetHash unique for a given validator set, rather than changing with every block (as the Accum changes)
- Unsafe RPC calls are not enabled by default. This includes `/dial_seeds`, and all calls prefixed with `unsafe`. Use the `--rpc.unsafe` flag to enable.
FEATURES:
- Per-module log levels. For instance, the new default is `state:info,*:error`, which means the `state` package logs at `info` level, and everything else logs at `error` level
- Log if a node is validator or not in every consensus round
- Use ldflags to set git hash as part of the version
- Ignore `address` and `pub_key` fields in `priv_validator.json` and overwrite them with the values derrived from the `priv_key`
IMPROVEMENTS:
- Merge `tendermint/go-p2p -> tendermint/tendermint/p2p` and `tendermint/go-rpc -> tendermint/tendermint/rpc/lib`
- Update paths for grand repo merge:
- `go-common -> tmlibs/common`
- `go-data -> go-wire/data`
- All other `go-` libs, except `go-crypto` and `go-wire`, are merged under `tmlibs`
- No global loggers (loggers are passed into constructors, or preferably set with a SetLogger method)
- Return HTTP status codes with errors for RPC responses
- Limit `/blockchain_info` call to return a maximum of 20 blocks
- Use `.Wrap()` and `.Unwrap()` instead of eg. `PubKeyS` for `go-crypto` types
- RPC JSON responses use pretty printing (via `json.MarshalIndent`)
- Color code different instances of the consensus for tests
- Isolate viper to `cmd/tendermint/commands` and do not read config from file for tests
## 0.9.2 (April 26, 2017)
BUG FIXES:
@@ -150,7 +227,7 @@ IMPROVEMENTS:
- Less verbose logging
- Better test coverage (37% -> 49%)
- Canonical SignBytes for signable types
- Write-Ahead Log for Mempool and Consensus via go-autofile
- Write-Ahead Log for Mempool and Consensus via tmlibs/autofile
- Better in-process testing for the consensus reactor and byzantine faults
- Better crash/restart testing for individual nodes at preset failure points, and of networks at arbitrary points
- Better abstraction over timeout mechanics

View File

@@ -1,8 +1,8 @@
FROM alpine:3.5
# This is the release of tendermint to pull in.
ENV TM_VERSION 0.9.0
ENV TM_SHA256SUM 697033ea0f34f8b34a8a2b74c4dd730b47dd4efcfce65e53e953bdae8eb14364
ENV TM_VERSION 0.9.1
ENV TM_SHA256SUM da34234755937140dcd953afcc965555fad7e05afd546711bc5bdc2df3d54226
# Tendermint will be looking for genesis file in /tendermint (unless you change
# `genesis_file` in config.toml). You can put your config.toml and private

View File

@@ -1,10 +1,12 @@
build:
# TAG=0.8.0 TAG_NO_PATCH=0.8
docker build -t "tendermint/tendermint" -t "tendermint/tendermint:$TAG" -t "tendermint/tendermint:$TAG_NO_PATCH" .
docker build -t "tendermint/tendermint" -t "tendermint/tendermint:$(TAG)" -t "tendermint/tendermint:$(TAG_NO_PATCH)" .
push:
# TAG=0.8.0 TAG_NO_PATCH=0.8
docker push "tendermint/tendermint" "tendermint/tendermint:$TAG" "tendermint/tendermint:$TAG_NO_PATCH"
docker push "tendermint/tendermint:latest"
docker push "tendermint/tendermint:$(TAG)"
docker push "tendermint/tendermint:$(TAG_NO_PATCH)"
build_develop:
docker build -t "tendermint/tendermint:develop" -f Dockerfile.develop .

View File

@@ -1,6 +1,7 @@
# Supported tags and respective `Dockerfile` links
- `0.9.0`, `0.9`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile)
- `0.9.1`, `0.9`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/809e0e8c5933604ba8b2d096803ada7c5ec4dfd3/DOCKER/Dockerfile)
- `0.9.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile)
- `0.8.0`, `0.8` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/bf64dd21fdb193e54d8addaaaa2ecf7ac371de8c/DOCKER/Dockerfile)
- `develop` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/master/DOCKER/Dockerfile.develop)

View File

@@ -8,10 +8,12 @@ TMHOME = $${TMHOME:-$$HOME/.tendermint}
all: install test
install: get_vendor_deps
@go install ./cmd/tendermint
@go install --ldflags '-extldflags "-static"' \
--ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse HEAD`" ./cmd/tendermint
build:
go build -o build/tendermint ./cmd/tendermint
go build \
--ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse HEAD`" -o build/tendermint ./cmd/tendermint/
build_race:
go build -race -o build/tendermint ./cmd/tendermint

View File

@@ -51,16 +51,19 @@ Yay open source! Please see our [contributing guidelines](https://tendermint.com
### Sub-projects
* [ABCI](http://github.com/tendermint/abci)
* [Mintnet](http://github.com/tendermint/mintnet)
* [Go-Wire](http://github.com/tendermint/go-wire)
* [Go-P2P](http://github.com/tendermint/go-p2p)
* [Go-Merkle](http://github.com/tendermint/go-merkle)
* [ABCI](http://github.com/tendermint/abci), the Application Blockchain Interface
* [Go-Wire](http://github.com/tendermint/go-wire), a deterministic serialization library
* [Go-Crypto](http://github.com/tendermint/go-crypto), an elliptic curve cryptography library
* [TmLibs](http://github.com/tendermint/tmlibs), an assortment of Go libraries
* [Merkleeyes](http://github.com/tendermint/merkleeyes), a balanced, binary Merkle tree for ABCI apps
### Tools
* [Deployment, Benchmarking, and Monitoring](https://github.com/tendermint/tools)
### Applications
* [Ethermint](http://github.com/tendermint/ethermint)
* [Basecoin](http://github.com/tendermint/basecoin)
* [Ethermint](http://github.com/tendermint/ethermint): Ethereum on Tendermint
* [Basecoin](http://github.com/tendermint/basecoin), a cryptocurrency application framework
### More

2
Vagrantfile vendored
View File

@@ -5,7 +5,7 @@ Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/trusty64"
config.vm.provider "virtualbox" do |v|
v.memory = 3072
v.memory = 4096
v.cpus = 2
end

View File

@@ -4,7 +4,7 @@ import (
"testing"
"github.com/tendermint/go-crypto"
"github.com/tendermint/go-p2p"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/go-wire"
proto "github.com/tendermint/tendermint/benchmarks/proto"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
@@ -12,10 +12,10 @@ import (
func BenchmarkEncodeStatusWire(b *testing.B) {
b.StopTimer()
pubKey := crypto.GenPrivKeyEd25519().PubKey().(crypto.PubKeyEd25519)
pubKey := crypto.GenPrivKeyEd25519().PubKey()
status := &ctypes.ResultStatus{
NodeInfo: &p2p.NodeInfo{
PubKey: pubKey,
PubKey: pubKey.Unwrap().(crypto.PubKeyEd25519),
Moniker: "SOMENAME",
Network: "SOMENAME",
RemoteAddr: "SOMEADDR",
@@ -40,7 +40,7 @@ func BenchmarkEncodeStatusWire(b *testing.B) {
func BenchmarkEncodeNodeInfoWire(b *testing.B) {
b.StopTimer()
pubKey := crypto.GenPrivKeyEd25519().PubKey().(crypto.PubKeyEd25519)
pubKey := crypto.GenPrivKeyEd25519().PubKey().Unwrap().(crypto.PubKeyEd25519)
nodeInfo := &p2p.NodeInfo{
PubKey: pubKey,
Moniker: "SOMENAME",
@@ -61,7 +61,7 @@ func BenchmarkEncodeNodeInfoWire(b *testing.B) {
func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
b.StopTimer()
pubKey := crypto.GenPrivKeyEd25519().PubKey().(crypto.PubKeyEd25519)
pubKey := crypto.GenPrivKeyEd25519().PubKey().Unwrap().(crypto.PubKeyEd25519)
nodeInfo := &p2p.NodeInfo{
PubKey: pubKey,
Moniker: "SOMENAME",
@@ -83,7 +83,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
func BenchmarkEncodeNodeInfoProto(b *testing.B) {
b.StopTimer()
pubKey := crypto.GenPrivKeyEd25519().PubKey().(crypto.PubKeyEd25519)
pubKey := crypto.GenPrivKeyEd25519().PubKey().Unwrap().(crypto.PubKeyEd25519)
pubKey2 := &proto.PubKey{Ed25519: &proto.PubKeyEd25519{Bytes: pubKey[:]}}
nodeInfo := &proto.NodeInfo{
PubKey: pubKey2,

View File

@@ -1,7 +1,7 @@
package benchmarks
import (
. "github.com/tendermint/go-common"
. "github.com/tendermint/tmlibs/common"
"testing"
)

View File

@@ -4,7 +4,7 @@ import (
"os"
"testing"
. "github.com/tendermint/go-common"
. "github.com/tendermint/tmlibs/common"
)
func BenchmarkFileWrite(b *testing.B) {

View File

@@ -7,11 +7,11 @@ import (
"fmt"
"github.com/gorilla/websocket"
. "github.com/tendermint/go-common"
"github.com/tendermint/go-rpc/client"
"github.com/tendermint/go-rpc/types"
"github.com/tendermint/go-wire"
_ "github.com/tendermint/tendermint/rpc/core/types" // Register RPCResponse > Result types
"github.com/tendermint/tendermint/rpc/lib/client"
"github.com/tendermint/tendermint/rpc/lib/types"
. "github.com/tendermint/tmlibs/common"
)
func main() {
@@ -37,13 +37,16 @@ func main() {
for i := 0; ; i++ {
binary.BigEndian.PutUint64(buf, uint64(i))
//txBytes := hex.EncodeToString(buf[:n])
request := rpctypes.NewRPCRequest("fakeid",
request, err := rpctypes.MapToRequest("fakeid",
"broadcast_tx",
map[string]interface{}{"tx": buf[:8]})
if err != nil {
Exit(err.Error())
}
reqBytes := wire.JSONBytes(request)
//fmt.Println("!!", string(reqBytes))
fmt.Print(".")
err := ws.WriteMessage(websocket.TextMessage, reqBytes)
err = ws.WriteMessage(websocket.TextMessage, reqBytes)
if err != nil {
Exit(err.Error())
}

View File

@@ -1,7 +0,0 @@
package blockchain
import (
"github.com/tendermint/go-logger"
)
var log = logger.New("module", "blockchain")

View File

@@ -5,9 +5,10 @@ import (
"sync"
"time"
. "github.com/tendermint/go-common"
flow "github.com/tendermint/go-flowrate/flowrate"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common"
flow "github.com/tendermint/tmlibs/flowrate"
"github.com/tendermint/tmlibs/log"
)
const (
@@ -58,7 +59,7 @@ func NewBlockPool(start int, requestsCh chan<- BlockRequest, timeoutsCh chan<- s
requestsCh: requestsCh,
timeoutsCh: timeoutsCh,
}
bp.BaseService = *NewBaseService(log, "BlockPool", bp)
bp.BaseService = *NewBaseService(nil, "BlockPool", bp)
return bp
}
@@ -106,7 +107,7 @@ func (pool *BlockPool) removeTimedoutPeers() {
// XXX remove curRate != 0
if curRate != 0 && curRate < minRecvRate {
pool.sendTimeout(peer.id)
log.Warn("SendTimeout", "peer", peer.id, "reason", "curRate too low")
pool.Logger.Error("SendTimeout", "peer", peer.id, "reason", "curRate too low")
peer.didTimeout = true
}
}
@@ -132,7 +133,7 @@ func (pool *BlockPool) IsCaughtUp() bool {
// Need at least 1 peer to be considered caught up.
if len(pool.peers) == 0 {
log.Debug("Blockpool has no peers")
pool.Logger.Debug("Blockpool has no peers")
return false
}
@@ -142,7 +143,7 @@ func (pool *BlockPool) IsCaughtUp() bool {
}
isCaughtUp := (height > 0 || time.Now().Sub(pool.startTime) > 5*time.Second) && (maxPeerHeight == 0 || height >= maxPeerHeight)
log.Notice(Fmt("IsCaughtUp: %v", isCaughtUp), "height", height, "maxPeerHeight", maxPeerHeight)
pool.Logger.Info(Fmt("IsCaughtUp: %v", isCaughtUp), "height", height, "maxPeerHeight", maxPeerHeight)
return isCaughtUp
}
@@ -226,6 +227,7 @@ func (pool *BlockPool) SetPeerHeight(peerID string, height int) {
peer.height = height
} else {
peer = newBPPeer(pool, peerID, height)
peer.setLogger(pool.Logger.With("peer", peerID))
pool.peers[peerID] = peer
}
}
@@ -279,6 +281,7 @@ func (pool *BlockPool) makeNextRequester() {
nextHeight := pool.height + len(pool.requesters)
request := newBPRequester(pool, nextHeight)
request.SetLogger(pool.Logger.With("height", nextHeight))
pool.requesters[nextHeight] = request
pool.numPending++
@@ -328,6 +331,8 @@ type bpPeer struct {
numPending int32
timeout *time.Timer
didTimeout bool
logger log.Logger
}
func newBPPeer(pool *BlockPool, peerID string, height int) *bpPeer {
@@ -336,10 +341,15 @@ func newBPPeer(pool *BlockPool, peerID string, height int) *bpPeer {
id: peerID,
height: height,
numPending: 0,
logger: log.NewNopLogger(),
}
return peer
}
func (peer *bpPeer) setLogger(l log.Logger) {
peer.logger = l
}
func (peer *bpPeer) resetMonitor() {
peer.recvMonitor = flow.New(time.Second, time.Second*40)
var initialValue = float64(minRecvRate) * math.E
@@ -377,7 +387,7 @@ func (peer *bpPeer) onTimeout() {
defer peer.pool.mtx.Unlock()
peer.pool.sendTimeout(peer.id)
log.Warn("SendTimeout", "peer", peer.id, "reason", "onTimeout")
peer.logger.Error("SendTimeout", "reason", "onTimeout")
peer.didTimeout = true
}

View File

@@ -5,8 +5,9 @@ import (
"testing"
"time"
. "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
)
func init() {
@@ -34,6 +35,7 @@ func TestBasic(t *testing.T) {
timeoutsCh := make(chan string, 100)
requestsCh := make(chan BlockRequest, 100)
pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool.SetLogger(log.TestingLogger())
pool.Start()
defer pool.Stop()
@@ -65,7 +67,7 @@ func TestBasic(t *testing.T) {
case peerID := <-timeoutsCh:
t.Errorf("timeout: %v", peerID)
case request := <-requestsCh:
log.Info("TEST: Pulled new BlockRequest", "request", request)
t.Logf("Pulled new BlockRequest %v", request)
if request.Height == 300 {
return // Done!
}
@@ -73,7 +75,7 @@ func TestBasic(t *testing.T) {
go func() {
block := &types.Block{Header: &types.Header{Height: request.Height}}
pool.AddBlock(request.PeerID, block, 123)
log.Info("TEST: Added block", "block", request.Height, "peer", request.PeerID)
t.Logf("Added block from peer %v (height: %v)", request.PeerID, request.Height)
}()
}
}
@@ -85,11 +87,12 @@ func TestTimeout(t *testing.T) {
timeoutsCh := make(chan string, 100)
requestsCh := make(chan BlockRequest, 100)
pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool.SetLogger(log.TestingLogger())
pool.Start()
defer pool.Stop()
for _, peer := range peers {
log.Info("Peer", "id", peer.id)
t.Logf("Peer %v", peer.id)
}
// Introduce each peer.
@@ -120,7 +123,7 @@ func TestTimeout(t *testing.T) {
for {
select {
case peerID := <-timeoutsCh:
log.Info("Timeout", "peerID", peerID)
t.Logf("Peer %v timeouted", peerID)
if _, ok := timedOut[peerID]; !ok {
counter++
if counter == len(peers) {
@@ -128,7 +131,7 @@ func TestTimeout(t *testing.T) {
}
}
case request := <-requestsCh:
log.Info("TEST: Pulled new BlockRequest", "request", request)
t.Logf("Pulled new BlockRequest %+v", request)
}
}
}

View File

@@ -6,13 +6,12 @@ import (
"reflect"
"time"
cmn "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-wire"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
)
const (
@@ -43,7 +42,6 @@ type consensusReactor interface {
type BlockchainReactor struct {
p2p.BaseReactor
config cfg.Config
state *sm.State
proxyAppConn proxy.AppConnConsensus // same as consensus.proxyAppConn
store *BlockStore
@@ -57,7 +55,7 @@ type BlockchainReactor struct {
}
// NewBlockchainReactor returns new reactor instance.
func NewBlockchainReactor(config cfg.Config, state *sm.State, proxyAppConn proxy.AppConnConsensus, store *BlockStore, fastSync bool) *BlockchainReactor {
func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConnConsensus, store *BlockStore, fastSync bool) *BlockchainReactor {
if state.LastBlockHeight == store.Height()-1 {
store.height-- // XXX HACK, make this better
}
@@ -72,7 +70,6 @@ func NewBlockchainReactor(config cfg.Config, state *sm.State, proxyAppConn proxy
timeoutsCh,
)
bcR := &BlockchainReactor{
config: config,
state: state,
proxyAppConn: proxyAppConn,
store: store,
@@ -81,7 +78,7 @@ func NewBlockchainReactor(config cfg.Config, state *sm.State, proxyAppConn proxy
requestsCh: requestsCh,
timeoutsCh: timeoutsCh,
}
bcR.BaseReactor = *p2p.NewBaseReactor(log, "BlockchainReactor", bcR)
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
return bcR
}
@@ -131,11 +128,11 @@ func (bcR *BlockchainReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes)
if err != nil {
log.Warn("Error decoding message", "error", err)
bcR.Logger.Error("Error decoding message", "error", err)
return
}
log.Debug("Receive", "src", src, "chID", chID, "msg", msg)
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
switch msg := msg.(type) {
case *bcBlockRequestMessage:
@@ -163,7 +160,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
// Got a peer status. Unverified.
bcR.pool.SetPeerHeight(src.Key, msg.Height)
default:
log.Warn(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
bcR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
}
@@ -203,10 +200,10 @@ FOR_LOOP:
case _ = <-switchToConsensusTicker.C:
height, numPending, _ := bcR.pool.GetStatus()
outbound, inbound, _ := bcR.Switch.NumPeers()
log.Info("Consensus ticker", "numPending", numPending, "total", len(bcR.pool.requesters),
bcR.Logger.Info("Consensus ticker", "numPending", numPending, "total", len(bcR.pool.requesters),
"outbound", outbound, "inbound", inbound)
if bcR.pool.IsCaughtUp() {
log.Notice("Time to switch to consensus reactor!", "height", height)
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
bcR.pool.Stop()
conR := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
@@ -220,12 +217,12 @@ FOR_LOOP:
for i := 0; i < 10; i++ {
// See if there are any blocks to sync.
first, second := bcR.pool.PeekTwoBlocks()
//log.Info("TrySync peeked", "first", first, "second", second)
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
if first == nil || second == nil {
// We need both to sync the first block.
break SYNC_LOOP
}
firstParts := first.MakePartSet(bcR.config.GetInt("block_part_size")) // TODO: put part size in parts header?
firstParts := first.MakePartSet(types.DefaultBlockPartSize)
firstPartsHeader := firstParts.Header()
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
@@ -234,7 +231,7 @@ FOR_LOOP:
err := bcR.state.Validators.VerifyCommit(
bcR.state.ChainID, types.BlockID{first.Hash(), firstPartsHeader}, first.Height, second.LastCommit)
if err != nil {
log.Info("error in validation", "error", err)
bcR.Logger.Info("error in validation", "error", err)
bcR.pool.RedoRequest(first.Height)
break SYNC_LOOP
} else {

View File

@@ -7,8 +7,8 @@ import (
"io"
"sync"
. "github.com/tendermint/go-common"
dbm "github.com/tendermint/go-db"
. "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
)

View File

@@ -0,0 +1,87 @@
package flags
import (
"fmt"
"strings"
"github.com/pkg/errors"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tmlibs/log"
)
const (
defaultLogLevelKey = "*"
)
// ParseLogLevel parses complex log level - comma-separated
// list of module:level pairs with an optional *:level pair (* means
// all other modules).
//
// Example:
// ParseLogLevel("consensus:debug,mempool:debug,*:error", log.NewTMLogger(os.Stdout))
func ParseLogLevel(lvl string, logger log.Logger) (log.Logger, error) {
if lvl == "" {
return nil, errors.New("Empty log level")
}
l := lvl
// prefix simple one word levels (e.g. "info") with "*"
if !strings.Contains(l, ":") {
l = defaultLogLevelKey + ":" + l
}
options := make([]log.Option, 0)
isDefaultLogLevelSet := false
var option log.Option
var err error
list := strings.Split(l, ",")
for _, item := range list {
moduleAndLevel := strings.Split(item, ":")
if len(moduleAndLevel) != 2 {
return nil, fmt.Errorf("Expected list in a form of \"module:level\" pairs, given pair %s, list %s", item, list)
}
module := moduleAndLevel[0]
level := moduleAndLevel[1]
if module == defaultLogLevelKey {
option, err = log.AllowLevel(level)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("Failed to parse default log level (pair %s, list %s)", item, l))
}
options = append(options, option)
isDefaultLogLevelSet = true
} else {
switch level {
case "debug":
option = log.AllowDebugWith("module", module)
case "info":
option = log.AllowInfoWith("module", module)
case "error":
option = log.AllowErrorWith("module", module)
case "none":
option = log.AllowNoneWith("module", module)
default:
return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" log level, given %s (pair %s, list %s)", level, item, list)
}
options = append(options, option)
}
}
// if "*" is not provided, set default global level
if !isDefaultLogLevelSet {
option, err = log.AllowLevel(cfg.DefaultLogLevel())
if err != nil {
return nil, err
}
options = append(options, option)
}
return log.NewFilter(logger, options...), nil
}

View File

@@ -0,0 +1,64 @@
package flags_test
import (
"bytes"
"strings"
"testing"
tmflags "github.com/tendermint/tendermint/cmd/tendermint/commands/flags"
"github.com/tendermint/tmlibs/log"
)
func TestParseLogLevel(t *testing.T) {
var buf bytes.Buffer
jsonLogger := log.NewTMJSONLogger(&buf)
correctLogLevels := []struct {
lvl string
expectedLogLines []string
}{
{"mempool:error", []string{``, ``, `{"_msg":"Mesmero","level":"error","module":"mempool"}`}},
{"mempool:error,*:debug", []string{``, ``, `{"_msg":"Mesmero","level":"error","module":"mempool"}`}},
{"*:debug,wire:none", []string{
`{"_msg":"Kingpin","level":"debug","module":"mempool"}`,
`{"_msg":"Kitty Pryde","level":"info","module":"mempool"}`,
`{"_msg":"Mesmero","level":"error","module":"mempool"}`}},
}
for _, c := range correctLogLevels {
logger, err := tmflags.ParseLogLevel(c.lvl, jsonLogger)
if err != nil {
t.Fatal(err)
}
logger = logger.With("module", "mempool")
buf.Reset()
logger.Debug("Kingpin")
if have := strings.TrimSpace(buf.String()); c.expectedLogLines[0] != have {
t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[0], have, c.lvl)
}
buf.Reset()
logger.Info("Kitty Pryde")
if have := strings.TrimSpace(buf.String()); c.expectedLogLines[1] != have {
t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[1], have, c.lvl)
}
buf.Reset()
logger.Error("Mesmero")
if have := strings.TrimSpace(buf.String()); c.expectedLogLines[2] != have {
t.Errorf("\nwant '%s'\nhave '%s'\nlevel '%s'", c.expectedLogLines[2], have, c.lvl)
}
}
incorrectLogLevel := []string{"some", "mempool:some", "*:some,mempool:error"}
for _, lvl := range incorrectLogLevel {
if _, err := tmflags.ParseLogLevel(lvl, jsonLogger); err == nil {
t.Fatalf("Expected %s to produce error", lvl)
}
}
}

View File

@@ -1,11 +1,11 @@
package commands
import (
"encoding/json"
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
)
@@ -21,7 +21,7 @@ func init() {
func genValidator(cmd *cobra.Command, args []string) {
privValidator := types.GenPrivValidator()
privValidatorJSONBytes := wire.JSONBytesPretty(privValidator)
privValidatorJSONBytes, _ := json.MarshalIndent(privValidator, "", "\t")
fmt.Printf(`%v
`, string(privValidatorJSONBytes))
}

View File

@@ -5,8 +5,8 @@ import (
"github.com/spf13/cobra"
cmn "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
)
var initFilesCmd = &cobra.Command{
@@ -20,13 +20,13 @@ func init() {
}
func initFiles(cmd *cobra.Command, args []string) {
privValFile := config.GetString("priv_validator_file")
privValFile := config.PrivValidatorFile()
if _, err := os.Stat(privValFile); os.IsNotExist(err) {
privValidator := types.GenPrivValidator()
privValidator.SetFile(privValFile)
privValidator.Save()
genFile := config.GetString("genesis_file")
genFile := config.GenesisFile()
if _, err := os.Stat(genFile); os.IsNotExist(err) {
genDoc := types.GenesisDoc{
@@ -40,8 +40,8 @@ func initFiles(cmd *cobra.Command, args []string) {
genDoc.SaveAs(genFile)
}
log.Notice("Initialized tendermint", "genesis", config.GetString("genesis_file"), "priv_validator", config.GetString("priv_validator_file"))
logger.Info("Initialized tendermint", "genesis", config.GenesisFile(), "priv_validator", config.PrivValidatorFile())
} else {
log.Notice("Already initialized", "priv_validator", config.GetString("priv_validator_file"))
logger.Info("Already initialized", "priv_validator", config.PrivValidatorFile())
}
}

View File

@@ -6,31 +6,30 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/go-p2p/upnp"
"github.com/tendermint/tendermint/p2p/upnp"
)
var probeUpnpCmd = &cobra.Command{
Use: "probe_upnp",
Short: "Test UPnP functionality",
Run: probeUpnp,
RunE: probeUpnp,
}
func init() {
RootCmd.AddCommand(probeUpnpCmd)
}
func probeUpnp(cmd *cobra.Command, args []string) {
capabilities, err := upnp.Probe()
func probeUpnp(cmd *cobra.Command, args []string) error {
capabilities, err := upnp.Probe(logger)
if err != nil {
fmt.Println("Probe failed: %v", err)
} else {
fmt.Println("Probe success!")
jsonBytes, err := json.Marshal(capabilities)
if err != nil {
panic(err)
return err
}
fmt.Println(string(jsonBytes))
}
return nil
}

View File

@@ -1,36 +1,24 @@
package commands
import (
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/consensus"
"github.com/spf13/cobra"
)
var replayCmd = &cobra.Command{
Use: "replay [walfile]",
Use: "replay",
Short: "Replay messages from WAL",
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 1 {
consensus.RunReplayFile(config, args[1], false)
} else {
fmt.Println("replay requires an argument (walfile)")
}
consensus.RunReplayFile(config.BaseConfig, config.Consensus, false)
},
}
var replayConsoleCmd = &cobra.Command{
Use: "replay_console [walfile]",
Use: "replay_console",
Short: "Replay messages from WAL in a console",
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 1 {
consensus.RunReplayFile(config, args[1], true)
} else {
fmt.Println("replay_console requires an argument (walfile)")
}
consensus.RunReplayFile(config.BaseConfig, config.Consensus, true)
},
}

View File

@@ -5,9 +5,8 @@ import (
"github.com/spf13/cobra"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/log15"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tmlibs/log"
)
var resetAllCmd = &cobra.Command{
@@ -30,33 +29,33 @@ func init() {
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetAll(cmd *cobra.Command, args []string) {
ResetAll(config, log)
ResetAll(config.DBDir(), config.PrivValidatorFile(), logger)
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetPrivValidator(cmd *cobra.Command, args []string) {
ResetPrivValidator(config, log)
resetPrivValidatorLocal(config.PrivValidatorFile(), logger)
}
// Exported so other CLI tools can use it
func ResetAll(c cfg.Config, l log15.Logger) {
ResetPrivValidator(c, l)
os.RemoveAll(c.GetString("db_dir"))
func ResetAll(dbDir, privValFile string, logger log.Logger) {
resetPrivValidatorLocal(privValFile, logger)
os.RemoveAll(dbDir)
logger.Info("Removed all data", "dir", dbDir)
}
func ResetPrivValidator(c cfg.Config, l log15.Logger) {
func resetPrivValidatorLocal(privValFile string, logger log.Logger) {
// Get PrivValidator
var privValidator *types.PrivValidator
privValidatorFile := c.GetString("priv_validator_file")
if _, err := os.Stat(privValidatorFile); err == nil {
privValidator = types.LoadPrivValidator(privValidatorFile)
if _, err := os.Stat(privValFile); err == nil {
privValidator = types.LoadPrivValidator(privValFile)
privValidator.Reset()
l.Notice("Reset PrivValidator", "file", privValidatorFile)
logger.Info("Reset PrivValidator", "file", privValFile)
} else {
privValidator = types.GenPrivValidator()
privValidator.SetFile(privValidatorFile)
privValidator.SetFile(privValFile)
privValidator.Save()
l.Notice("Generated PrivValidator", "file", privValidatorFile)
logger.Info("Generated PrivValidator", "file", privValFile)
}
}

View File

@@ -1,31 +1,39 @@
package commands
import (
"github.com/spf13/cobra"
"os"
"github.com/tendermint/go-logger"
tmcfg "github.com/tendermint/tendermint/config/tendermint"
"github.com/spf13/cobra"
"github.com/spf13/viper"
tmflags "github.com/tendermint/tendermint/cmd/tendermint/commands/flags"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tmlibs/log"
)
var (
config = tmcfg.GetConfig("")
log = logger.New("module", "main")
config = cfg.DefaultConfig()
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "main")
)
//global flag
var logLevel string
func init() {
RootCmd.PersistentFlags().String("log_level", config.LogLevel, "Log level")
}
var RootCmd = &cobra.Command{
Use: "tendermint",
Short: "Tendermint Core (BFT Consensus) in Go",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
// set the log level in the config and logger
config.Set("log_level", logLevel)
logger.SetLogLevel(logLevel)
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
err := viper.Unmarshal(config)
if err != nil {
return err
}
config.SetRoot(config.RootDir)
cfg.EnsureRoot(config.RootDir)
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger)
if err != nil {
return err
}
return nil
},
}
func init() {
//parse flag and set config
RootCmd.PersistentFlags().StringVar(&logLevel, "log_level", config.GetString("log_level"), "Log level")
}

View File

@@ -0,0 +1,99 @@
package commands
import (
"os"
"strconv"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tmlibs/cli"
"testing"
)
var (
defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
)
const (
rootName = "root"
)
// isolate provides a clean setup and returns a copy of RootCmd you can
// modify in the test cases
func isolate(cmds ...*cobra.Command) cli.Executable {
viper.Reset()
config = cfg.DefaultConfig()
r := &cobra.Command{
Use: rootName,
PersistentPreRunE: RootCmd.PersistentPreRunE,
}
r.AddCommand(cmds...)
wr := cli.PrepareBaseCmd(r, "TM", defaultRoot)
return wr
}
func TestRootConfig(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// we pre-create a config file we can refer to in the rest of
// the test cases.
cvals := map[string]string{
"moniker": "monkey",
"fast_sync": "false",
}
// proper types of the above settings
cfast := false
conf, err := cli.WriteDemoConfig(cvals)
require.Nil(err)
defaults := cfg.DefaultConfig()
dmax := defaults.P2P.MaxNumPeers
cases := []struct {
args []string
env map[string]string
root string
moniker string
fastSync bool
maxPeer int
}{
{nil, nil, defaultRoot, defaults.Moniker, defaults.FastSync, dmax},
// try multiple ways of setting root (two flags, cli vs. env)
{[]string{"--home", conf}, nil, conf, cvals["moniker"], cfast, dmax},
{nil, map[string]string{"TMROOT": conf}, conf, cvals["moniker"], cfast, dmax},
// check setting p2p subflags two different ways
{[]string{"--p2p.max_num_peers", "420"}, nil, defaultRoot, defaults.Moniker, defaults.FastSync, 420},
{nil, map[string]string{"TM_P2P_MAX_NUM_PEERS": "17"}, defaultRoot, defaults.Moniker, defaults.FastSync, 17},
// try to set env that have no flags attached...
{[]string{"--home", conf}, map[string]string{"TM_MONIKER": "funny"}, conf, "funny", cfast, dmax},
}
for idx, tc := range cases {
i := strconv.Itoa(idx)
// test command that does nothing, except trigger unmarshalling in root
noop := &cobra.Command{
Use: "noop",
RunE: func(cmd *cobra.Command, args []string) error {
return nil
},
}
noop.Flags().Int("p2p.max_num_peers", defaults.P2P.MaxNumPeers, "")
cmd := isolate(noop)
args := append([]string{rootName, noop.Use}, tc.args...)
err := cli.RunWithArgs(cmd, args, tc.env)
require.Nil(err, i)
assert.Equal(tc.root, config.RootDir, i)
assert.Equal(tc.root, config.P2P.RootDir, i)
assert.Equal(tc.root, config.Consensus.RootDir, i)
assert.Equal(tc.root, config.Mempool.RootDir, i)
assert.Equal(tc.moniker, config.Moniker, i)
assert.Equal(tc.fastSync, config.FastSync, i)
assert.Equal(tc.maxPeer, config.P2P.MaxNumPeers, i)
}
}

View File

@@ -1,124 +1,93 @@
package commands
import (
"fmt"
"io/ioutil"
"time"
"github.com/spf13/cobra"
. "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
)
var runNodeCmd = &cobra.Command{
Use: "node",
Short: "Run the tendermint node",
PreRun: setConfigFlags,
Run: runNode,
Use: "node",
Short: "Run the tendermint node",
RunE: runNode,
}
//flags
var (
moniker string
nodeLaddr string
seeds string
fastSync bool
skipUPNP bool
rpcLaddr string
grpcLaddr string
proxyApp string
abciTransport string
pex bool
)
func init() {
// bind flags
runNodeCmd.Flags().String("moniker", config.Moniker, "Node Name")
// configuration options
runNodeCmd.Flags().StringVar(&moniker, "moniker", config.GetString("moniker"),
"Node Name")
runNodeCmd.Flags().StringVar(&nodeLaddr, "node_laddr", config.GetString("node_laddr"),
"Node listen address. (0.0.0.0:0 means any interface, any port)")
runNodeCmd.Flags().StringVar(&seeds, "seeds", config.GetString("seeds"),
"Comma delimited host:port seed nodes")
runNodeCmd.Flags().BoolVar(&fastSync, "fast_sync", config.GetBool("fast_sync"),
"Fast blockchain syncing")
runNodeCmd.Flags().BoolVar(&skipUPNP, "skip_upnp", config.GetBool("skip_upnp"),
"Skip UPNP configuration")
runNodeCmd.Flags().StringVar(&rpcLaddr, "rpc_laddr", config.GetString("rpc_laddr"),
"RPC listen address. Port required")
runNodeCmd.Flags().StringVar(&grpcLaddr, "grpc_laddr", config.GetString("grpc_laddr"),
"GRPC listen address (BroadcastTx only). Port required")
runNodeCmd.Flags().StringVar(&proxyApp, "proxy_app", config.GetString("proxy_app"),
"Proxy app address, or 'nilapp' or 'dummy' for local testing.")
runNodeCmd.Flags().StringVar(&abciTransport, "abci", config.GetString("abci"),
"Specify abci transport (socket | grpc)")
// node flags
runNodeCmd.Flags().Bool("fast_sync", config.FastSync, "Fast blockchain syncing")
// feature flags
runNodeCmd.Flags().BoolVar(&pex, "pex", config.GetBool("pex_reactor"),
"Enable Peer-Exchange (dev feature)")
// abci flags
runNodeCmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or 'nilapp' or 'dummy' for local testing.")
runNodeCmd.Flags().String("abci", config.ABCI, "Specify abci transport (socket | grpc)")
// rpc flags
runNodeCmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required")
runNodeCmd.Flags().String("rpc.grpc_laddr", config.RPC.GRPCListenAddress, "GRPC listen address (BroadcastTx only). Port required")
runNodeCmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "Enabled unsafe rpc methods")
// p2p flags
runNodeCmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)")
runNodeCmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma delimited host:port seed nodes")
runNodeCmd.Flags().Bool("p2p.skip_upnp", config.P2P.SkipUPNP, "Skip UPNP configuration")
runNodeCmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable Peer-Exchange (dev feature)")
RootCmd.AddCommand(runNodeCmd)
}
func setConfigFlags(cmd *cobra.Command, args []string) {
// Merge parsed flag values onto config
config.Set("moniker", moniker)
config.Set("node_laddr", nodeLaddr)
config.Set("seeds", seeds)
config.Set("fast_sync", fastSync)
config.Set("skip_upnp", skipUPNP)
config.Set("rpc_laddr", rpcLaddr)
config.Set("grpc_laddr", grpcLaddr)
config.Set("proxy_app", proxyApp)
config.Set("abci", abciTransport)
config.Set("pex_reactor", pex)
}
// Users wishing to:
// * Use an external signer for their validators
// * Supply an in-proc abci app
// should import github.com/tendermint/tendermint/node and implement
// their own run_node to call node.NewNode (instead of node.NewNodeDefault)
// with their custom priv validator and/or custom proxy.ClientCreator
func runNode(cmd *cobra.Command, args []string) {
func runNode(cmd *cobra.Command, args []string) error {
// Wait until the genesis doc becomes available
// This is for Mintnet compatibility.
// TODO: If Mintnet gets deprecated or genesis_file is
// always available, remove.
genDocFile := config.GetString("genesis_file")
if !FileExists(genDocFile) {
log.Notice(Fmt("Waiting for genesis file %v...", genDocFile))
genDocFile := config.GenesisFile()
if !cmn.FileExists(genDocFile) {
logger.Info(cmn.Fmt("Waiting for genesis file %v...", genDocFile))
for {
time.Sleep(time.Second)
if !FileExists(genDocFile) {
if !cmn.FileExists(genDocFile) {
continue
}
jsonBlob, err := ioutil.ReadFile(genDocFile)
if err != nil {
Exit(Fmt("Couldn't read GenesisDoc file: %v", err))
return fmt.Errorf("Couldn't read GenesisDoc file: %v", err)
}
genDoc, err := types.GenesisDocFromJSON(jsonBlob)
if err != nil {
Exit(Fmt("Error reading GenesisDoc: %v", err))
return fmt.Errorf("Error reading GenesisDoc: %v", err)
}
if genDoc.ChainID == "" {
Exit(Fmt("Genesis doc %v must include non-empty chain_id", genDocFile))
return fmt.Errorf("Genesis doc %v must include non-empty chain_id", genDocFile)
}
config.Set("chain_id", genDoc.ChainID)
config.ChainID = genDoc.ChainID
}
}
// Create & start node
n := node.NewNodeDefault(config)
n := node.NewNodeDefault(config, logger.With("module", "node"))
if _, err := n.Start(); err != nil {
Exit(Fmt("Failed to start node: %v", err))
return fmt.Errorf("Failed to start node: %v", err)
} else {
log.Notice("Started node", "nodeInfo", n.Switch().NodeInfo())
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
}
// Trap signal, run forever.
n.RunForever()
return nil
}

View File

@@ -5,7 +5,7 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/go-wire"
"github.com/tendermint/go-wire/data"
"github.com/tendermint/tendermint/types"
)
@@ -20,7 +20,7 @@ func init() {
}
func showValidator(cmd *cobra.Command, args []string) {
privValidatorFile := config.GetString("priv_validator_file")
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
fmt.Println(string(wire.JSONBytesPretty(privValidator.PubKey)))
privValidator := types.LoadOrGenPrivValidator(config.PrivValidatorFile(), logger)
pubKeyJSONBytes, _ := data.ToJSON(privValidator.PubKey)
fmt.Println(string(pubKeyJSONBytes))
}

View File

@@ -7,7 +7,7 @@ import (
"github.com/spf13/cobra"
cmn "github.com/tendermint/go-common"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tendermint/types"
)

View File

@@ -1,15 +1,13 @@
package main
import (
"fmt"
"os"
"github.com/tendermint/tendermint/cmd/tendermint/commands"
"github.com/tendermint/tmlibs/cli"
)
func main() {
if err := commands.RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
cmd := cli.PrepareBaseCmd(commands.RootCmd, "TM", os.ExpandEnv("$HOME/.tendermint"))
cmd.Execute()
}

345
config/config.go Normal file
View File

@@ -0,0 +1,345 @@
package config
import (
"fmt"
"path/filepath"
"time"
"github.com/tendermint/tendermint/types"
)
type Config struct {
// Top level options use an anonymous struct
BaseConfig `mapstructure:",squash"`
// Options for services
RPC *RPCConfig `mapstructure:"rpc"`
P2P *P2PConfig `mapstructure:"p2p"`
Mempool *MempoolConfig `mapstructure:"mempool"`
Consensus *ConsensusConfig `mapstructure:"consensus"`
}
func DefaultConfig() *Config {
return &Config{
BaseConfig: DefaultBaseConfig(),
RPC: DefaultRPCConfig(),
P2P: DefaultP2PConfig(),
Mempool: DefaultMempoolConfig(),
Consensus: DefaultConsensusConfig(),
}
}
func TestConfig() *Config {
return &Config{
BaseConfig: TestBaseConfig(),
RPC: TestRPCConfig(),
P2P: TestP2PConfig(),
Mempool: DefaultMempoolConfig(),
Consensus: TestConsensusConfig(),
}
}
// Set the RootDir for all Config structs
func (cfg *Config) SetRoot(root string) *Config {
cfg.BaseConfig.RootDir = root
cfg.RPC.RootDir = root
cfg.P2P.RootDir = root
cfg.Mempool.RootDir = root
cfg.Consensus.RootDir = root
return cfg
}
//-----------------------------------------------------------------------------
// BaseConfig
// BaseConfig struct for a Tendermint node
type BaseConfig struct {
// The root directory for all data.
// This should be set in viper so it can unmarshal into this struct
RootDir string `mapstructure:"home"`
// The ID of the chain to join (should be signed with every transaction and vote)
ChainID string `mapstructure:"chain_id"`
// A JSON file containing the initial validator set and other meta data
Genesis string `mapstructure:"genesis_file"`
// A JSON file containing the private key to use as a validator in the consensus protocol
PrivValidator string `mapstructure:"priv_validator_file"`
// A custom human readable name for this node
Moniker string `mapstructure:"moniker"`
// TCP or UNIX socket address of the ABCI application,
// or the name of an ABCI application compiled in with the Tendermint binary
ProxyApp string `mapstructure:"proxy_app"`
// Mechanism to connect to the ABCI application: socket | grpc
ABCI string `mapstructure:"abci"`
// Output level for logging
LogLevel string `mapstructure:"log_level"`
// TCP or UNIX socket address for the profiling server to listen on
ProfListenAddress string `mapstructure:"prof_laddr"`
// If this node is many blocks behind the tip of the chain, FastSync
// allows them to catchup quickly by downloading blocks in parallel
// and verifying their commits
FastSync bool `mapstructure:"fast_sync"`
// If true, query the ABCI app on connecting to a new peer
// so the app can decide if we should keep the connection or not
FilterPeers bool `mapstructure:"filter_peers"` // false
// What indexer to use for transactions
TxIndex string `mapstructure:"tx_index"`
// Database backend: leveldb | memdb
DBBackend string `mapstructure:"db_backend"`
// Database directory
DBPath string `mapstructure:"db_dir"`
}
func DefaultBaseConfig() BaseConfig {
return BaseConfig{
Genesis: "genesis.json",
PrivValidator: "priv_validator.json",
Moniker: "anonymous",
ProxyApp: "tcp://127.0.0.1:46658",
ABCI: "socket",
LogLevel: DefaultPackageLogLevels(),
ProfListenAddress: "",
FastSync: true,
FilterPeers: false,
TxIndex: "kv",
DBBackend: "leveldb",
DBPath: "data",
}
}
func TestBaseConfig() BaseConfig {
conf := DefaultBaseConfig()
conf.ChainID = "tendermint_test"
conf.ProxyApp = "dummy"
conf.FastSync = false
conf.DBBackend = "memdb"
return conf
}
func (b BaseConfig) GenesisFile() string {
return rootify(b.Genesis, b.RootDir)
}
func (b BaseConfig) PrivValidatorFile() string {
return rootify(b.PrivValidator, b.RootDir)
}
func (b BaseConfig) DBDir() string {
return rootify(b.DBPath, b.RootDir)
}
func DefaultLogLevel() string {
return "error"
}
func DefaultPackageLogLevels() string {
return fmt.Sprintf("state:info,*:%s", DefaultLogLevel())
}
//-----------------------------------------------------------------------------
// RPCConfig
type RPCConfig struct {
RootDir string `mapstructure:"home"`
// TCP or UNIX socket address for the RPC server to listen on
ListenAddress string `mapstructure:"laddr"`
// TCP or UNIX socket address for the gRPC server to listen on
// NOTE: This server only supports /broadcast_tx_commit
GRPCListenAddress string `mapstructure:"grpc_laddr"`
// Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
Unsafe bool `mapstructure:"unsafe"`
}
func DefaultRPCConfig() *RPCConfig {
return &RPCConfig{
ListenAddress: "tcp://0.0.0.0:46657",
GRPCListenAddress: "",
Unsafe: false,
}
}
func TestRPCConfig() *RPCConfig {
conf := DefaultRPCConfig()
conf.ListenAddress = "tcp://0.0.0.0:36657"
conf.GRPCListenAddress = "tcp://0.0.0.0:36658"
conf.Unsafe = true
return conf
}
//-----------------------------------------------------------------------------
// P2PConfig
type P2PConfig struct {
RootDir string `mapstructure:"home"`
ListenAddress string `mapstructure:"laddr"`
Seeds string `mapstructure:"seeds"`
SkipUPNP bool `mapstructure:"skip_upnp"`
AddrBook string `mapstructure:"addr_book_file"`
AddrBookStrict bool `mapstructure:"addr_book_strict"`
PexReactor bool `mapstructure:"pex"`
MaxNumPeers int `mapstructure:"max_num_peers"`
}
func DefaultP2PConfig() *P2PConfig {
return &P2PConfig{
ListenAddress: "tcp://0.0.0.0:46656",
AddrBook: "addrbook.json",
AddrBookStrict: true,
MaxNumPeers: 50,
}
}
func TestP2PConfig() *P2PConfig {
conf := DefaultP2PConfig()
conf.ListenAddress = "tcp://0.0.0.0:36656"
conf.SkipUPNP = true
return conf
}
func (p *P2PConfig) AddrBookFile() string {
return rootify(p.AddrBook, p.RootDir)
}
//-----------------------------------------------------------------------------
// MempoolConfig
type MempoolConfig struct {
RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"`
RecheckEmpty bool `mapstructure:"recheck_empty"`
Broadcast bool `mapstructure:"broadcast"`
WalPath string `mapstructure:"wal_dir"`
}
func DefaultMempoolConfig() *MempoolConfig {
return &MempoolConfig{
Recheck: true,
RecheckEmpty: true,
Broadcast: true,
WalPath: "data/mempool.wal",
}
}
func (m *MempoolConfig) WalDir() string {
return rootify(m.WalPath, m.RootDir)
}
//-----------------------------------------------------------------------------
// ConsensusConfig
// ConsensusConfig holds timeouts and details about the WAL, the block structure,
// and timeouts in the consensus protocol.
type ConsensusConfig struct {
RootDir string `mapstructure:"home"`
WalPath string `mapstructure:"wal_file"`
WalLight bool `mapstructure:"wal_light"`
walFile string // overrides WalPath if set
// All timeouts are in ms
TimeoutPropose int `mapstructure:"timeout_propose"`
TimeoutProposeDelta int `mapstructure:"timeout_propose_delta"`
TimeoutPrevote int `mapstructure:"timeout_prevote"`
TimeoutPrevoteDelta int `mapstructure:"timeout_prevote_delta"`
TimeoutPrecommit int `mapstructure:"timeout_precommit"`
TimeoutPrecommitDelta int `mapstructure:"timeout_precommit_delta"`
TimeoutCommit int `mapstructure:"timeout_commit"`
// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"`
// BlockSize
MaxBlockSizeTxs int `mapstructure:"max_block_size_txs"`
MaxBlockSizeBytes int `mapstructure:"max_block_size_bytes"`
// TODO: This probably shouldn't be exposed but it makes it
// easy to write tests for the wal/replay
BlockPartSize int `mapstructure:"block_part_size"`
}
// Wait this long for a proposal
func (cfg *ConsensusConfig) Propose(round int) time.Duration {
return time.Duration(cfg.TimeoutPropose+cfg.TimeoutProposeDelta*round) * time.Millisecond
}
// After receiving any +2/3 prevote, wait this long for stragglers
func (cfg *ConsensusConfig) Prevote(round int) time.Duration {
return time.Duration(cfg.TimeoutPrevote+cfg.TimeoutPrevoteDelta*round) * time.Millisecond
}
// After receiving any +2/3 precommits, wait this long for stragglers
func (cfg *ConsensusConfig) Precommit(round int) time.Duration {
return time.Duration(cfg.TimeoutPrecommit+cfg.TimeoutPrecommitDelta*round) * time.Millisecond
}
// After receiving +2/3 precommits for a single block (a commit), wait this long for stragglers in the next height's RoundStepNewHeight
func (cfg *ConsensusConfig) Commit(t time.Time) time.Time {
return t.Add(time.Duration(cfg.TimeoutCommit) * time.Millisecond)
}
func DefaultConsensusConfig() *ConsensusConfig {
return &ConsensusConfig{
WalPath: "data/cs.wal/wal",
WalLight: false,
TimeoutPropose: 3000,
TimeoutProposeDelta: 500,
TimeoutPrevote: 1000,
TimeoutPrevoteDelta: 500,
TimeoutPrecommit: 1000,
TimeoutPrecommitDelta: 500,
TimeoutCommit: 1000,
SkipTimeoutCommit: false,
MaxBlockSizeTxs: 10000,
MaxBlockSizeBytes: 1, // TODO
BlockPartSize: types.DefaultBlockPartSize, // TODO: we shouldnt be importing types
}
}
func TestConsensusConfig() *ConsensusConfig {
config := DefaultConsensusConfig()
config.TimeoutPropose = 2000
config.TimeoutProposeDelta = 1
config.TimeoutPrevote = 10
config.TimeoutPrevoteDelta = 1
config.TimeoutPrecommit = 10
config.TimeoutPrecommitDelta = 1
config.TimeoutCommit = 10
config.SkipTimeoutCommit = true
return config
}
func (c *ConsensusConfig) WalFile() string {
if c.walFile != "" {
return c.walFile
}
return rootify(c.WalPath, c.RootDir)
}
func (c *ConsensusConfig) SetWalFile(walFile string) {
c.walFile = walFile
}
//-----------------------------------------------------------------------------
// Utils
// helper function to make config creation independent of root dir
func rootify(path, root string) string {
if filepath.IsAbs(path) {
return path
}
return filepath.Join(root, path)
}

28
config/config_test.go Normal file
View File

@@ -0,0 +1,28 @@
package config
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestDefaultConfig(t *testing.T) {
assert := assert.New(t)
// set up some defaults
cfg := DefaultConfig()
assert.NotNil(cfg.P2P)
assert.NotNil(cfg.Mempool)
assert.NotNil(cfg.Consensus)
// check the root dir stuff...
cfg.SetRoot("/foo")
cfg.Genesis = "bar"
cfg.DBPath = "/opt/data"
cfg.Mempool.WalPath = "wal/mem/"
assert.Equal("/foo/bar", cfg.GenesisFile())
assert.Equal("/opt/data", cfg.DBDir())
assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir())
}

View File

@@ -1,125 +0,0 @@
package tendermint
import (
"os"
"path"
"strings"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
)
func getTMRoot(rootDir string) string {
if rootDir == "" {
rootDir = os.Getenv("TMHOME")
}
if rootDir == "" {
// deprecated, use TMHOME (TODO: remove in TM 0.11.0)
rootDir = os.Getenv("TMROOT")
}
if rootDir == "" {
rootDir = os.Getenv("HOME") + "/.tendermint"
}
return rootDir
}
func initTMRoot(rootDir string) {
rootDir = getTMRoot(rootDir)
EnsureDir(rootDir, 0700)
EnsureDir(rootDir+"/data", 0700)
configFilePath := path.Join(rootDir, "config.toml")
// Write default config file if missing.
if !FileExists(configFilePath) {
// Ask user for moniker
// moniker := cfg.Prompt("Type hostname: ", "anonymous")
MustWriteFile(configFilePath, []byte(defaultConfig("anonymous")), 0644)
}
}
func GetConfig(rootDir string) cfg.Config {
rootDir = getTMRoot(rootDir)
initTMRoot(rootDir)
configFilePath := path.Join(rootDir, "config.toml")
mapConfig, err := cfg.ReadMapConfigFromFile(configFilePath)
if err != nil {
Exit(Fmt("Could not read config: %v", err))
}
// Set defaults or panic
if mapConfig.IsSet("chain_id") {
Exit("Cannot set 'chain_id' via config.toml")
}
if mapConfig.IsSet("revision_file") {
Exit("Cannot set 'revision_file' via config.toml. It must match what's in the Makefile")
}
mapConfig.SetRequired("chain_id") // blows up if you try to use it before setting.
mapConfig.SetDefault("genesis_file", rootDir+"/genesis.json")
mapConfig.SetDefault("proxy_app", "tcp://127.0.0.1:46658")
mapConfig.SetDefault("abci", "socket")
mapConfig.SetDefault("moniker", "anonymous")
mapConfig.SetDefault("node_laddr", "tcp://0.0.0.0:46656")
mapConfig.SetDefault("seeds", "")
// mapConfig.SetDefault("seeds", "goldenalchemist.chaintest.net:46656")
mapConfig.SetDefault("fast_sync", true)
mapConfig.SetDefault("skip_upnp", false)
mapConfig.SetDefault("addrbook_file", rootDir+"/addrbook.json")
mapConfig.SetDefault("addrbook_strict", true) // disable to allow connections locally
mapConfig.SetDefault("pex_reactor", false) // enable for peer exchange
mapConfig.SetDefault("priv_validator_file", rootDir+"/priv_validator.json")
mapConfig.SetDefault("db_backend", "leveldb")
mapConfig.SetDefault("db_dir", rootDir+"/data")
mapConfig.SetDefault("log_level", "info")
mapConfig.SetDefault("rpc_laddr", "tcp://0.0.0.0:46657")
mapConfig.SetDefault("grpc_laddr", "")
mapConfig.SetDefault("prof_laddr", "")
mapConfig.SetDefault("revision_file", rootDir+"/revision")
mapConfig.SetDefault("cs_wal_file", rootDir+"/data/cs.wal/wal")
mapConfig.SetDefault("cs_wal_light", false)
mapConfig.SetDefault("filter_peers", false)
mapConfig.SetDefault("block_size", 10000) // max number of txs
mapConfig.SetDefault("block_part_size", 65536) // part size 64K
mapConfig.SetDefault("disable_data_hash", false)
// all timeouts are in ms
mapConfig.SetDefault("timeout_handshake", 10000)
mapConfig.SetDefault("timeout_propose", 3000)
mapConfig.SetDefault("timeout_propose_delta", 500)
mapConfig.SetDefault("timeout_prevote", 1000)
mapConfig.SetDefault("timeout_prevote_delta", 500)
mapConfig.SetDefault("timeout_precommit", 1000)
mapConfig.SetDefault("timeout_precommit_delta", 500)
mapConfig.SetDefault("timeout_commit", 1000)
// make progress asap (no `timeout_commit`) on full precommit votes
mapConfig.SetDefault("skip_timeout_commit", false)
mapConfig.SetDefault("mempool_recheck", true)
mapConfig.SetDefault("mempool_recheck_empty", true)
mapConfig.SetDefault("mempool_broadcast", true)
mapConfig.SetDefault("mempool_wal_dir", rootDir+"/data/mempool.wal")
mapConfig.SetDefault("tx_index", "kv")
return mapConfig
}
var defaultConfigTmpl = `# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
proxy_app = "tcp://127.0.0.1:46658"
moniker = "__MONIKER__"
node_laddr = "tcp://0.0.0.0:46656"
seeds = ""
fast_sync = true
db_backend = "leveldb"
log_level = "notice"
rpc_laddr = "tcp://0.0.0.0:46657"
`
func defaultConfig(moniker string) (defaultConfig string) {
defaultConfig = strings.Replace(defaultConfigTmpl, "__MONIKER__", moniker, -1)
return
}

View File

@@ -1,22 +0,0 @@
// If you wanted to use logrotate, I suppose this might be the config you want.
// Instead, I'll just write our own, that way we don't need sudo to install.
$HOME/.tendermint/logs/tendermint.log {
missingok
notifempty
rotate 12
daily
size 10M
compress
delaycompress
}
$HOME/.barak/logs/barak.log {
missingok
notifempty
rotate 12
weekly
size 10M
compress
delaycompress
}

View File

@@ -1,164 +0,0 @@
// Import this in all *_test.go files to initialize ~/.tendermint_test.
package tendermint_test
import (
"os"
"path"
"strings"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-logger"
)
func init() {
// Creates ~/.tendermint_test
EnsureDir(os.Getenv("HOME")+"/.tendermint_test", 0700)
}
func initTMRoot(rootDir string) {
// Remove ~/.tendermint_test_bak
if FileExists(rootDir + "_bak") {
err := os.RemoveAll(rootDir + "_bak")
if err != nil {
PanicSanity(err.Error())
}
}
// Move ~/.tendermint_test to ~/.tendermint_test_bak
if FileExists(rootDir) {
err := os.Rename(rootDir, rootDir+"_bak")
if err != nil {
PanicSanity(err.Error())
}
}
// Create new dir
EnsureDir(rootDir, 0700)
EnsureDir(rootDir+"/data", 0700)
configFilePath := path.Join(rootDir, "config.toml")
genesisFilePath := path.Join(rootDir, "genesis.json")
privFilePath := path.Join(rootDir, "priv_validator.json")
// Write default config file if missing.
if !FileExists(configFilePath) {
// Ask user for moniker
// moniker := cfg.Prompt("Type hostname: ", "anonymous")
MustWriteFile(configFilePath, []byte(defaultConfig("anonymous")), 0644)
}
if !FileExists(genesisFilePath) {
MustWriteFile(genesisFilePath, []byte(defaultGenesis), 0644)
}
// we always overwrite the priv val
MustWriteFile(privFilePath, []byte(defaultPrivValidator), 0644)
}
func ResetConfig(localPath string) cfg.Config {
rootDir := os.Getenv("HOME") + "/.tendermint_test/" + localPath
initTMRoot(rootDir)
configFilePath := path.Join(rootDir, "config.toml")
mapConfig, err := cfg.ReadMapConfigFromFile(configFilePath)
if err != nil {
Exit(Fmt("Could not read config: %v", err))
}
// Set defaults or panic
if mapConfig.IsSet("chain_id") {
Exit("Cannot set 'chain_id' via config.toml")
}
mapConfig.SetDefault("chain_id", "tendermint_test")
mapConfig.SetDefault("genesis_file", rootDir+"/genesis.json")
mapConfig.SetDefault("proxy_app", "dummy")
mapConfig.SetDefault("abci", "socket")
mapConfig.SetDefault("moniker", "anonymous")
mapConfig.SetDefault("node_laddr", "tcp://0.0.0.0:36656")
mapConfig.SetDefault("fast_sync", false)
mapConfig.SetDefault("skip_upnp", true)
mapConfig.SetDefault("addrbook_file", rootDir+"/addrbook.json")
mapConfig.SetDefault("addrbook_strict", true) // disable to allow connections locally
mapConfig.SetDefault("pex_reactor", false) // enable for peer exchange
mapConfig.SetDefault("priv_validator_file", rootDir+"/priv_validator.json")
mapConfig.SetDefault("db_backend", "memdb")
mapConfig.SetDefault("db_dir", rootDir+"/data")
mapConfig.SetDefault("log_level", "info")
mapConfig.SetDefault("rpc_laddr", "tcp://0.0.0.0:36657")
mapConfig.SetDefault("grpc_laddr", "tcp://0.0.0.0:36658")
mapConfig.SetDefault("prof_laddr", "")
mapConfig.SetDefault("revision_file", rootDir+"/revision")
mapConfig.SetDefault("cs_wal_file", rootDir+"/data/cs.wal/wal")
mapConfig.SetDefault("cs_wal_light", false)
mapConfig.SetDefault("filter_peers", false)
mapConfig.SetDefault("block_size", 10000)
mapConfig.SetDefault("block_part_size", 65536) // part size 64K
mapConfig.SetDefault("disable_data_hash", false)
mapConfig.SetDefault("timeout_handshake", 10000)
mapConfig.SetDefault("timeout_propose", 2000)
mapConfig.SetDefault("timeout_propose_delta", 1)
mapConfig.SetDefault("timeout_prevote", 10)
mapConfig.SetDefault("timeout_prevote_delta", 1)
mapConfig.SetDefault("timeout_precommit", 10)
mapConfig.SetDefault("timeout_precommit_delta", 1)
mapConfig.SetDefault("timeout_commit", 10)
mapConfig.SetDefault("skip_timeout_commit", true)
mapConfig.SetDefault("mempool_recheck", true)
mapConfig.SetDefault("mempool_recheck_empty", true)
mapConfig.SetDefault("mempool_broadcast", true)
mapConfig.SetDefault("mempool_wal_dir", "")
mapConfig.SetDefault("tx_index", "kv")
logger.SetLogLevel(mapConfig.GetString("log_level"))
return mapConfig
}
var defaultConfigTmpl = `# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
proxy_app = "dummy"
moniker = "__MONIKER__"
node_laddr = "tcp://0.0.0.0:36656"
seeds = ""
fast_sync = false
db_backend = "memdb"
log_level = "info"
rpc_laddr = "tcp://0.0.0.0:36657"
`
func defaultConfig(moniker string) (defaultConfig string) {
defaultConfig = strings.Replace(defaultConfigTmpl, "__MONIKER__", moniker, -1)
return
}
var defaultGenesis = `{
"genesis_time": "0001-01-01T00:00:00.000Z",
"chain_id": "tendermint_test",
"validators": [
{
"pub_key": [
1,
"3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
],
"amount": 10,
"name": ""
}
],
"app_hash": ""
}`
var defaultPrivValidator = `{
"address": "D028C9981F7A87F3093672BF0D5B0E2A1B3ED456",
"pub_key": [
1,
"3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
],
"priv_key": [
1,
"27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
],
"last_height": 0,
"last_round": 0,
"last_step": 0
}`

142
config/toml.go Normal file
View File

@@ -0,0 +1,142 @@
package config
import (
"os"
"path"
"path/filepath"
"strings"
cmn "github.com/tendermint/tmlibs/common"
)
/****** these are for production settings ***********/
func EnsureRoot(rootDir string) {
cmn.EnsureDir(rootDir, 0700)
cmn.EnsureDir(rootDir+"/data", 0700)
configFilePath := path.Join(rootDir, "config.toml")
// Write default config file if missing.
if !cmn.FileExists(configFilePath) {
// Ask user for moniker
// moniker := cfg.Prompt("Type hostname: ", "anonymous")
cmn.MustWriteFile(configFilePath, []byte(defaultConfig("anonymous")), 0644)
}
}
var defaultConfigTmpl = `# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
proxy_app = "tcp://127.0.0.1:46658"
moniker = "__MONIKER__"
fast_sync = true
db_backend = "leveldb"
log_level = "state:info,*:error"
[rpc]
laddr = "tcp://0.0.0.0:46657"
[p2p]
laddr = "tcp://0.0.0.0:46656"
seeds = ""
`
func defaultConfig(moniker string) string {
return strings.Replace(defaultConfigTmpl, "__MONIKER__", moniker, -1)
}
/****** these are for test settings ***********/
func ResetTestRoot(testName string) *Config {
rootDir := os.ExpandEnv("$HOME/.tendermint_test")
rootDir = filepath.Join(rootDir, testName)
// Remove ~/.tendermint_test_bak
if cmn.FileExists(rootDir + "_bak") {
err := os.RemoveAll(rootDir + "_bak")
if err != nil {
cmn.PanicSanity(err.Error())
}
}
// Move ~/.tendermint_test to ~/.tendermint_test_bak
if cmn.FileExists(rootDir) {
err := os.Rename(rootDir, rootDir+"_bak")
if err != nil {
cmn.PanicSanity(err.Error())
}
}
// Create new dir
cmn.EnsureDir(rootDir, 0700)
cmn.EnsureDir(rootDir+"/data", 0700)
configFilePath := path.Join(rootDir, "config.toml")
genesisFilePath := path.Join(rootDir, "genesis.json")
privFilePath := path.Join(rootDir, "priv_validator.json")
// Write default config file if missing.
if !cmn.FileExists(configFilePath) {
// Ask user for moniker
cmn.MustWriteFile(configFilePath, []byte(testConfig("anonymous")), 0644)
}
if !cmn.FileExists(genesisFilePath) {
cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
}
// we always overwrite the priv val
cmn.MustWriteFile(privFilePath, []byte(testPrivValidator), 0644)
config := TestConfig().SetRoot(rootDir)
return config
}
var testConfigTmpl = `# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
proxy_app = "dummy"
moniker = "__MONIKER__"
fast_sync = false
db_backend = "memdb"
log_level = "info"
[rpc]
laddr = "tcp://0.0.0.0:36657"
[p2p]
laddr = "tcp://0.0.0.0:36656"
seeds = ""
`
func testConfig(moniker string) (testConfig string) {
testConfig = strings.Replace(testConfigTmpl, "__MONIKER__", moniker, -1)
return
}
var testGenesis = `{
"genesis_time": "0001-01-01T00:00:00.000Z",
"chain_id": "tendermint_test",
"validators": [
{
"pub_key": {
"type": "ed25519",
"data":"3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
},
"amount": 10,
"name": ""
}
],
"app_hash": ""
}`
var testPrivValidator = `{
"address": "D028C9981F7A87F3093672BF0D5B0E2A1B3ED456",
"pub_key": {
"type": "ed25519",
"data": "3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
},
"priv_key": {
"type": "ed25519",
"data": "27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
},
"last_height": 0,
"last_round": 0,
"last_step": 0
}`

57
config/toml_test.go Normal file
View File

@@ -0,0 +1,57 @@
package config
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func ensureFiles(t *testing.T, rootDir string, files ...string) {
for _, f := range files {
p := rootify(rootDir, f)
_, err := os.Stat(p)
assert.Nil(t, err, p)
}
}
func TestEnsureRoot(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// setup temp dir for test
tmpDir, err := ioutil.TempDir("", "config-test")
require.Nil(err)
defer os.RemoveAll(tmpDir)
// create root dir
EnsureRoot(tmpDir)
// make sure config is set properly
data, err := ioutil.ReadFile(filepath.Join(tmpDir, "config.toml"))
require.Nil(err)
assert.Equal([]byte(defaultConfig("anonymous")), data)
ensureFiles(t, tmpDir, "data")
}
func TestEnsureTestRoot(t *testing.T) {
assert, require := assert.New(t), require.New(t)
testName := "ensureTestRoot"
// create root dir
cfg := ResetTestRoot(testName)
rootDir := cfg.RootDir
// make sure config is set properly
data, err := ioutil.ReadFile(filepath.Join(rootDir, "config.toml"))
require.Nil(err)
assert.Equal([]byte(testConfig("anonymous")), data)
// TODO: make sure the cfg returned and testconfig are the same!
ensureFiles(t, rootDir, "data", "genesis.json", "priv_validator.json")
}

View File

@@ -5,17 +5,14 @@ import (
"testing"
"time"
"github.com/tendermint/tendermint/config/tendermint_test"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-events"
"github.com/tendermint/go-p2p"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/events"
)
func init() {
config = tendermint_test.ResetConfig("consensus_byzantine_test")
config = ResetConfig("consensus_byzantine_test")
}
//----------------------------------------------
@@ -29,14 +26,17 @@ func init() {
// Heal partition and ensure A sees the commit
func TestByzantine(t *testing.T) {
N := 4
logger := consensusLogger()
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
// give the byzantine validator a normal ticker
css[0].SetTimeoutTicker(NewTimeoutTicker())
switches := make([]*p2p.Switch, N)
p2pLogger := logger.With("module", "p2p")
for i := 0; i < N; i++ {
switches[i] = p2p.NewSwitch(cfg.NewMapConfig(nil))
switches[i] = p2p.NewSwitch(config.P2P)
switches[i].SetLogger(p2pLogger.With("validator", i))
}
reactors := make([]p2p.Reactor, N)
@@ -50,19 +50,21 @@ func TestByzantine(t *testing.T) {
}
}()
eventChans := make([]chan interface{}, N)
eventLogger := logger.With("module", "events")
for i := 0; i < N; i++ {
if i == 0 {
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator.(*types.PrivValidator))
// make byzantine
css[i].decideProposal = func(j int) func(int, int) {
return func(height, round int) {
byzantineDecideProposalFunc(height, round, css[j], switches[j])
byzantineDecideProposalFunc(t, height, round, css[j], switches[j])
}
}(i)
css[i].doPrevote = func(height, round int) {}
}
eventSwitch := events.NewEventSwitch()
eventSwitch.SetLogger(eventLogger.With("validator", i))
_, err := eventSwitch.Start()
if err != nil {
t.Fatalf("Failed to start switch: %v", err)
@@ -70,6 +72,7 @@ func TestByzantine(t *testing.T) {
eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1)
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
conR.SetLogger(logger.With("validator", i))
conR.SetEventSwitch(eventSwitch)
var conRI p2p.Reactor
@@ -80,7 +83,7 @@ func TestByzantine(t *testing.T) {
reactors[i] = conRI
}
p2p.MakeConnectedSwitches(N, func(i int, s *p2p.Switch) *p2p.Switch {
p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
// ignore new switch s, we already made ours
switches[i].AddReactor("CONSENSUS", reactors[i])
return switches[i]
@@ -118,7 +121,7 @@ func TestByzantine(t *testing.T) {
case <-eventChans[ind2]:
}
log.Notice("A block has been committed. Healing partition")
t.Log("A block has been committed. Healing partition")
// connect the partitions
p2p.Connect2Switches(switches, ind0, ind1)
@@ -156,7 +159,7 @@ func TestByzantine(t *testing.T) {
//-------------------------------
// byzantine consensus functions
func byzantineDecideProposalFunc(height, round int, cs *ConsensusState, sw *p2p.Switch) {
func byzantineDecideProposalFunc(t *testing.T, height, round int, cs *ConsensusState, sw *p2p.Switch) {
// byzantine user should create two proposals and try to split the vote.
// Avoid sending on internalMsgQueue and running consensus state.
@@ -177,7 +180,7 @@ func byzantineDecideProposalFunc(height, round int, cs *ConsensusState, sw *p2p.
// broadcast conflicting proposals/block parts to peers
peers := sw.Peers().List()
log.Notice("Byzantine: broadcasting conflicting proposals", "peers", len(peers))
t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers))
for i, peer := range peers {
if i < len(peers)/2 {
go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1)

View File

@@ -13,21 +13,24 @@ import (
abcicli "github.com/tendermint/abci/client"
abci "github.com/tendermint/abci/types"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
dbm "github.com/tendermint/go-db"
"github.com/tendermint/go-p2p"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/config/tendermint_test"
cfg "github.com/tendermint/tendermint/config"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log"
"github.com/tendermint/abci/example/counter"
"github.com/tendermint/abci/example/dummy"
"github.com/go-kit/kit/log/term"
)
var config cfg.Config // NOTE: must be reset for each _test.go file
// genesis, chain_id, priv_val
var config *cfg.Config // NOTE: must be reset for each _test.go file
var ensureTimeout = time.Duration(2)
func ensureDir(dir string, mode os.FileMode) {
@@ -36,6 +39,10 @@ func ensureDir(dir string, mode os.FileMode) {
}
}
func ResetConfig(name string) *cfg.Config {
return cfg.ResetTestRoot(name)
}
//-------------------------------------------------------------------------------
// validator stub (a dummy consensus peer we control)
@@ -64,7 +71,7 @@ func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartS
Type: voteType,
BlockID: types.BlockID{hash, header},
}
err := vs.PrivValidator.SignVote(config.GetString("chain_id"), vote)
err := vs.PrivValidator.SignVote(config.ChainID, vote)
return vote, err
}
@@ -115,7 +122,7 @@ func decideProposal(cs1 *ConsensusState, vs *validatorStub, height, round int) (
// Make proposal
polRound, polBlockID := cs1.Votes.POLInfo()
proposal = types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID)
if err := vs.SignProposal(config.GetString("chain_id"), proposal); err != nil {
if err := vs.SignProposal(config.ChainID, proposal); err != nil {
panic(err)
}
return
@@ -205,7 +212,7 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
go func() {
for {
v := <-voteCh0
vote := v.(types.EventDataVote)
vote := v.(types.TMEventData).Unwrap().(types.EventDataVote)
// we only fire for our own votes
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
voteCh <- v
@@ -233,7 +240,7 @@ func newConsensusState(state *sm.State, pv *types.PrivValidator, app abci.Applic
return newConsensusStateWithConfig(config, state, pv, app)
}
func newConsensusStateWithConfig(thisConfig cfg.Config, state *sm.State, pv *types.PrivValidator, app abci.Application) *ConsensusState {
func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv *types.PrivValidator, app abci.Application) *ConsensusState {
// Get BlockStore
blockDB := dbm.NewMemDB()
blockStore := bc.NewBlockStore(blockDB)
@@ -244,39 +251,46 @@ func newConsensusStateWithConfig(thisConfig cfg.Config, state *sm.State, pv *typ
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
// Make Mempool
mempool := mempl.NewMempool(thisConfig, proxyAppConnMem)
mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem)
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
// Make ConsensusReactor
cs := NewConsensusState(thisConfig, state, proxyAppConnCon, blockStore, mempool)
cs := NewConsensusState(thisConfig.Consensus, state, proxyAppConnCon, blockStore, mempool)
cs.SetLogger(log.TestingLogger())
cs.SetPrivValidator(pv)
evsw := types.NewEventSwitch()
evsw.SetLogger(log.TestingLogger().With("module", "events"))
cs.SetEventSwitch(evsw)
evsw.Start()
return cs
}
func loadPrivValidator(conf cfg.Config) *types.PrivValidator {
privValidatorFile := conf.GetString("priv_validator_file")
func loadPrivValidator(config *cfg.Config) *types.PrivValidator {
privValidatorFile := config.PrivValidatorFile()
ensureDir(path.Dir(privValidatorFile), 0700)
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
privValidator := types.LoadOrGenPrivValidator(privValidatorFile, log.TestingLogger())
privValidator.Reset()
return privValidator
}
func fixedConsensusState() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
state.SetLogger(log.TestingLogger().With("module", "state"))
privValidator := loadPrivValidator(config)
cs := newConsensusState(state, privValidator, counter.NewCounterApplication(true))
cs.SetLogger(log.TestingLogger())
return cs
}
func fixedConsensusStateDummy() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
state.SetLogger(log.TestingLogger().With("module", "state"))
privValidator := loadPrivValidator(config)
cs := newConsensusState(state, privValidator, dummy.NewDummyApplication())
cs.SetLogger(log.TestingLogger())
return cs
}
@@ -287,6 +301,7 @@ func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
vss := make([]*validatorStub, nValidators)
cs := newConsensusState(state, privVals[0], counter.NewCounterApplication(true))
cs.SetLogger(log.TestingLogger())
for i := 0; i < nValidators; i++ {
vss[i] = NewValidatorStub(privVals[i], i)
@@ -312,16 +327,32 @@ func ensureNoNewStep(stepCh chan interface{}) {
//-------------------------------------------------------------------------------
// consensus nets
// consensusLogger is a TestingLogger which uses a different
// color for each validator ("validator" key must exist).
func consensusLogger() log.Logger {
return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
for i := 0; i < len(keyvals)-1; i += 2 {
if keyvals[i] == "validator" {
return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
}
}
return term.FgBgColor{}
})
}
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
genDoc, privVals := randGenesisDoc(nValidators, false, 10)
css := make([]*ConsensusState, nValidators)
logger := consensusLogger()
for i := 0; i < nValidators; i++ {
db := dbm.NewMemDB() // each state needs its own db
state := sm.MakeGenesisState(db, genDoc)
state.SetLogger(logger.With("module", "state", "validator", i))
state.Save()
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
ensureDir(path.Dir(thisConfig.GetString("cs_wal_file")), 0700) // dir for wal
thisConfig := ResetConfig(Fmt("%s_%d", testName, i))
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], appFunc())
css[i].SetLogger(logger.With("validator", i))
css[i].SetTimeoutTicker(tickerFunc())
}
return css
@@ -334,9 +365,10 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
for i := 0; i < nPeers; i++ {
db := dbm.NewMemDB() // each state needs its own db
state := sm.MakeGenesisState(db, genDoc)
state.SetLogger(log.TestingLogger().With("module", "state"))
state.Save()
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
ensureDir(path.Dir(thisConfig.GetString("cs_wal_file")), 0700) // dir for wal
thisConfig := ResetConfig(Fmt("%s_%d", testName, i))
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
var privVal *types.PrivValidator
if i < nValidators {
privVal = privVals[i]
@@ -347,6 +379,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
}
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, appFunc())
css[i].SetLogger(log.TestingLogger())
css[i].SetTimeoutTicker(tickerFunc())
}
return css
@@ -379,7 +412,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
sort.Sort(types.PrivValidatorsByAddress(privValidators))
return &types.GenesisDoc{
GenesisTime: time.Now(),
ChainID: config.GetString("chain_id"),
ChainID: config.ChainID,
Validators: validators,
}, privValidators
}
@@ -388,6 +421,7 @@ func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.St
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
db := dbm.NewMemDB()
s0 := sm.MakeGenesisState(db, genDoc)
s0.SetLogger(log.TestingLogger().With("module", "state"))
s0.Save()
return s0, privValidators
}
@@ -438,6 +472,9 @@ func (m *mockTicker) Chan() <-chan timeoutInfo {
return m.c
}
func (mockTicker) SetLogger(log.Logger) {
}
//------------------------------------
func newCounter() abci.Application {

View File

@@ -4,8 +4,8 @@ import (
"strings"
"sync"
. "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common"
)
type RoundVoteSet struct {
@@ -91,7 +91,7 @@ func (hvs *HeightVoteSet) addRound(round int) {
if _, ok := hvs.roundVoteSets[round]; ok {
PanicSanity("addRound() for an existing round")
}
log.Debug("addRound(round)", "round", round)
// log.Debug("addRound(round)", "round", round)
prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.VoteTypePrevote, hvs.valSet)
precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.VoteTypePrecommit, hvs.valSet)
hvs.roundVoteSets[round] = RoundVoteSet{
@@ -118,7 +118,7 @@ func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerKey string) (added bool,
// Peer has sent a vote that does not match our round,
// for more than one round. Bad peer!
// TODO punish peer.
log.Warn("Deal with peer giving votes from unwanted rounds")
// log.Warn("Deal with peer giving votes from unwanted rounds")
return
}
}

View File

@@ -3,19 +3,18 @@ package consensus
import (
"testing"
. "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common"
)
func init() {
config = tendermint_test.ResetConfig("consensus_height_vote_set_test")
config = ResetConfig("consensus_height_vote_set_test")
}
func TestPeerCatchupRounds(t *testing.T) {
valSet, privVals := types.RandValidatorSet(10, 1)
hvs := NewHeightVoteSet(config.GetString("chain_id"), 1, valSet)
hvs := NewHeightVoteSet(config.ChainID, 1, valSet)
vote999_0 := makeVoteHR(t, 1, 999, privVals, 0)
added, err := hvs.AddVote(vote999_0, "peer1")
@@ -52,7 +51,7 @@ func makeVoteHR(t *testing.T, height, round int, privVals []*types.PrivValidator
Type: types.VoteTypePrecommit,
BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}},
}
chainID := config.GetString("chain_id")
chainID := config.ChainID
err := privVal.SignVote(chainID, vote)
if err != nil {
panic(Fmt("Error signing vote: %v", err))

View File

@@ -1,18 +0,0 @@
package consensus
import (
"github.com/tendermint/go-logger"
)
var log = logger.New("module", "consensus")
/*
func init() {
log.SetHandler(
logger.LvlFilterHandler(
logger.LvlDebug,
logger.BypassHandler(),
),
)
}
*/

View File

@@ -6,14 +6,13 @@ import (
"time"
abci "github.com/tendermint/abci/types"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/go-common"
. "github.com/tendermint/tmlibs/common"
)
func init() {
config = tendermint_test.ResetConfig("consensus_mempool_test")
config = ResetConfig("consensus_mempool_test")
}
func TestTxConcurrentWithCommit(t *testing.T) {
@@ -44,7 +43,7 @@ func TestTxConcurrentWithCommit(t *testing.T) {
for nTxs := 0; nTxs < NTxs; {
select {
case b := <-newBlockCh:
nTxs += b.(types.EventDataNewBlock).Block.Header.NumTxs
nTxs += b.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block.Header.NumTxs
case <-ticker.C:
panic("Timed out waiting to commit blocks with transactions")
}

View File

@@ -8,11 +8,11 @@ import (
"sync"
"time"
. "github.com/tendermint/go-common"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-wire"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common"
)
const (
@@ -41,12 +41,12 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *Consens
conS: consensusState,
fastSync: fastSync,
}
conR.BaseReactor = *p2p.NewBaseReactor(log, "ConsensusReactor", conR)
conR.BaseReactor = *p2p.NewBaseReactor("ConsensusReactor", conR)
return conR
}
func (conR *ConsensusReactor) OnStart() error {
log.Notice("ConsensusReactor ", "fastSync", conR.fastSync)
conR.Logger.Info("ConsensusReactor ", "fastSync", conR.fastSync)
conR.BaseReactor.OnStart()
// callbacks for broadcasting new steps and votes to peers
@@ -70,7 +70,7 @@ func (conR *ConsensusReactor) OnStop() {
// Switch from the fast_sync to the consensus:
// reset the state, turn off fast_sync, start the consensus-state-machine
func (conR *ConsensusReactor) SwitchToConsensus(state *sm.State) {
log.Notice("SwitchToConsensus")
conR.Logger.Info("SwitchToConsensus")
conR.conS.reconstructLastCommit(state)
// NOTE: The line below causes broadcastNewRoundStepRoutine() to
// broadcast a NewRoundStepMessage.
@@ -148,17 +148,17 @@ func (conR *ConsensusReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
// NOTE: blocks on consensus state for proposals, block parts, and votes
func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
if !conR.IsRunning() {
log.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes)
conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes)
return
}
_, msg, err := DecodeMessage(msgBytes)
if err != nil {
log.Warn("Error decoding message", "src", src, "chId", chID, "msg", msg, "error", err, "bytes", msgBytes)
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "error", err, "bytes", msgBytes)
// TODO punish peer?
return
}
log.Debug("Receive", "src", src, "chId", chID, "msg", msg)
conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
// Get peer states
ps := src.Data.Get(types.PeerStateKey).(*PeerState)
@@ -191,7 +191,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
case types.VoteTypePrecommit:
ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
default:
log.Warn("Bad VoteSetBitsMessage field Type")
conR.Logger.Error("Bad VoteSetBitsMessage field Type")
return
}
src.TrySend(VoteSetBitsChannel, struct{ ConsensusMessage }{&VoteSetBitsMessage{
@@ -202,12 +202,12 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
Votes: ourVotes,
}})
default:
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
conR.Logger.Error(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
case DataChannel:
if conR.fastSync {
log.Warn("Ignoring message received during fastSync", "msg", msg)
conR.Logger.Info("Ignoring message received during fastSync", "msg", msg)
return
}
switch msg := msg.(type) {
@@ -220,12 +220,12 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index)
conR.conS.peerMsgQueue <- msgInfo{msg, src.Key}
default:
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
conR.Logger.Error(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
case VoteChannel:
if conR.fastSync {
log.Warn("Ignoring message received during fastSync", "msg", msg)
conR.Logger.Info("Ignoring message received during fastSync", "msg", msg)
return
}
switch msg := msg.(type) {
@@ -242,12 +242,12 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
default:
// don't punish (leave room for soft upgrades)
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
conR.Logger.Error(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
case VoteSetBitsChannel:
if conR.fastSync {
log.Warn("Ignoring message received during fastSync", "msg", msg)
conR.Logger.Info("Ignoring message received during fastSync", "msg", msg)
return
}
switch msg := msg.(type) {
@@ -265,7 +265,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
case types.VoteTypePrecommit:
ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
default:
log.Warn("Bad VoteSetBitsMessage field Type")
conR.Logger.Error("Bad VoteSetBitsMessage field Type")
return
}
ps.ApplyVoteSetBitsMessage(msg, ourVotes)
@@ -274,15 +274,15 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
}
default:
// don't punish (leave room for soft upgrades)
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
conR.Logger.Error(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
default:
log.Warn(Fmt("Unknown chId %X", chID))
conR.Logger.Error(Fmt("Unknown chId %X", chID))
}
if err != nil {
log.Warn("Error in Receive()", "error", err)
conR.Logger.Error("Error in Receive()", "error", err)
}
}
@@ -299,12 +299,12 @@ func (conR *ConsensusReactor) SetEventSwitch(evsw types.EventSwitch) {
func (conR *ConsensusReactor) registerEventCallbacks() {
types.AddListenerForEvent(conR.evsw, "conR", types.EventStringNewRoundStep(), func(data types.TMEventData) {
rs := data.(types.EventDataRoundState).RoundState.(*RoundState)
rs := data.Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
conR.broadcastNewRoundStep(rs)
})
types.AddListenerForEvent(conR.evsw, "conR", types.EventStringVote(), func(data types.TMEventData) {
edv := data.(types.EventDataVote)
edv := data.Unwrap().(types.EventDataVote)
conR.broadcastHasVoteMessage(edv.Vote)
})
}
@@ -376,13 +376,13 @@ func (conR *ConsensusReactor) sendNewRoundStepMessages(peer *p2p.Peer) {
}
func (conR *ConsensusReactor) gossipDataRoutine(peer *p2p.Peer, ps *PeerState) {
log := log.New("peer", peer)
logger := conR.Logger.With("peer", peer)
OUTER_LOOP:
for {
// Manage disconnects from self or peer.
if !peer.IsRunning() || !conR.IsRunning() {
log.Notice(Fmt("Stopping gossipDataRoutine for %v.", peer))
logger.Info("Stopping gossipDataRoutine for peer")
return
}
rs := conR.conS.GetRoundState()
@@ -390,7 +390,7 @@ OUTER_LOOP:
// Send proposal Block parts?
if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartsHeader) {
//log.Info("ProposalBlockParts matched", "blockParts", prs.ProposalBlockParts)
//logger.Info("ProposalBlockParts matched", "blockParts", prs.ProposalBlockParts)
if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok {
part := rs.ProposalBlockParts.GetPart(index)
msg := &BlockPartMessage{
@@ -407,16 +407,16 @@ OUTER_LOOP:
// If the peer is on a previous height, help catch up.
if (0 < prs.Height) && (prs.Height < rs.Height) {
//log.Info("Data catchup", "height", rs.Height, "peerHeight", prs.Height, "peerProposalBlockParts", prs.ProposalBlockParts)
//logger.Info("Data catchup", "height", rs.Height, "peerHeight", prs.Height, "peerProposalBlockParts", prs.ProposalBlockParts)
if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
// Ensure that the peer's PartSetHeader is correct
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
if blockMeta == nil {
log.Warn("Failed to load block meta", "peer height", prs.Height, "our height", rs.Height, "blockstore height", conR.conS.blockStore.Height(), "pv", conR.conS.privValidator)
logger.Error("Failed to load block meta", "peer height", prs.Height, "our height", rs.Height, "blockstore height", conR.conS.blockStore.Height(), "pv", conR.conS.privValidator)
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
} else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) {
log.Info("Peer ProposalBlockPartsHeader mismatch, sleeping",
logger.Info("Peer ProposalBlockPartsHeader mismatch, sleeping",
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
@@ -424,7 +424,7 @@ OUTER_LOOP:
// Load the part
part := conR.conS.blockStore.LoadBlockPart(prs.Height, index)
if part == nil {
log.Warn("Could not load part", "index", index,
logger.Error("Could not load part", "index", index,
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
@@ -440,7 +440,7 @@ OUTER_LOOP:
}
continue OUTER_LOOP
} else {
//log.Info("No parts to send in catch-up, sleeping")
//logger.Info("No parts to send in catch-up, sleeping")
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
}
@@ -448,7 +448,7 @@ OUTER_LOOP:
// If height and round don't match, sleep.
if (rs.Height != prs.Height) || (rs.Round != prs.Round) {
//log.Info("Peer Height|Round mismatch, sleeping", "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer)
//logger.Info("Peer Height|Round mismatch, sleeping", "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer)
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
}
@@ -489,7 +489,7 @@ OUTER_LOOP:
}
func (conR *ConsensusReactor) gossipVotesRoutine(peer *p2p.Peer, ps *PeerState) {
log := log.New("peer", peer)
logger := conR.Logger.With("peer", peer)
// Simple hack to throttle logs upon sleep.
var sleeping = 0
@@ -498,7 +498,7 @@ OUTER_LOOP:
for {
// Manage disconnects from self or peer.
if !peer.IsRunning() || !conR.IsRunning() {
log.Notice(Fmt("Stopping gossipVotesRoutine for %v.", peer))
logger.Info("Stopping gossipVotesRoutine for peer")
return
}
rs := conR.conS.GetRoundState()
@@ -511,7 +511,7 @@ OUTER_LOOP:
sleeping = 0
}
//log.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round,
//logger.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round,
// "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step)
// If height matches, then send LastCommit, Prevotes, Precommits.
@@ -519,21 +519,21 @@ OUTER_LOOP:
// If there are lastCommits to send...
if prs.Step == RoundStepNewHeight {
if ps.PickSendVote(rs.LastCommit) {
log.Debug("Picked rs.LastCommit to send")
logger.Debug("Picked rs.LastCommit to send")
continue OUTER_LOOP
}
}
// If there are prevotes to send...
if prs.Step <= RoundStepPrevote && prs.Round != -1 && prs.Round <= rs.Round {
if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
log.Debug("Picked rs.Prevotes(prs.Round) to send")
logger.Debug("Picked rs.Prevotes(prs.Round) to send")
continue OUTER_LOOP
}
}
// If there are precommits to send...
if prs.Step <= RoundStepPrecommit && prs.Round != -1 && prs.Round <= rs.Round {
if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) {
log.Debug("Picked rs.Precommits(prs.Round) to send")
logger.Debug("Picked rs.Precommits(prs.Round) to send")
continue OUTER_LOOP
}
}
@@ -541,7 +541,7 @@ OUTER_LOOP:
if prs.ProposalPOLRound != -1 {
if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
if ps.PickSendVote(polPrevotes) {
log.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send")
logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send")
continue OUTER_LOOP
}
}
@@ -552,7 +552,7 @@ OUTER_LOOP:
// If peer is lagging by height 1, send LastCommit.
if prs.Height != 0 && rs.Height == prs.Height+1 {
if ps.PickSendVote(rs.LastCommit) {
log.Debug("Picked rs.LastCommit to send")
logger.Debug("Picked rs.LastCommit to send")
continue OUTER_LOOP
}
}
@@ -563,9 +563,9 @@ OUTER_LOOP:
// Load the block commit for prs.Height,
// which contains precommit signatures for prs.Height.
commit := conR.conS.blockStore.LoadBlockCommit(prs.Height)
log.Info("Loaded BlockCommit for catch-up", "height", prs.Height, "commit", commit)
logger.Info("Loaded BlockCommit for catch-up", "height", prs.Height, "commit", commit)
if ps.PickSendVote(commit) {
log.Debug("Picked Catchup commit to send")
logger.Debug("Picked Catchup commit to send")
continue OUTER_LOOP
}
}
@@ -573,7 +573,7 @@ OUTER_LOOP:
if sleeping == 0 {
// We sent nothing. Sleep...
sleeping = 1
log.Debug("No votes to send, sleeping", "peer", peer,
logger.Debug("No votes to send, sleeping",
"localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes,
"localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits)
} else if sleeping == 2 {
@@ -589,13 +589,13 @@ OUTER_LOOP:
// NOTE: `queryMaj23Routine` has a simple crude design since it only comes
// into play for liveness when there's a signature DDoS attack happening.
func (conR *ConsensusReactor) queryMaj23Routine(peer *p2p.Peer, ps *PeerState) {
log := log.New("peer", peer)
logger := conR.Logger.With("peer", peer)
OUTER_LOOP:
for {
// Manage disconnects from self or peer.
if !peer.IsRunning() || !conR.IsRunning() {
log.Notice(Fmt("Stopping queryMaj23Routine for %v.", peer))
logger.Info("Stopping queryMaj23Routine for peer")
return
}
@@ -952,8 +952,8 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) {
}
func (ps *PeerState) setHasVote(height int, round int, type_ byte, index int) {
log := log.New("peer", ps.Peer, "peerRound", ps.Round, "height", height, "round", round)
log.Debug("setHasVote(LastCommit)", "lastCommit", ps.LastCommit, "index", index)
logger := ps.Peer.Logger.With("peerRound", ps.Round, "height", height, "round", round)
logger.Debug("setHasVote(LastCommit)", "lastCommit", ps.LastCommit, "index", index)
// NOTE: some may be nil BitArrays -> no side effects.
ps.getVoteBitArray(height, round, type_).SetIndex(index, true)

View File

@@ -6,16 +6,14 @@ import (
"testing"
"time"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/go-events"
"github.com/tendermint/go-p2p"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/abci/example/dummy"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tmlibs/events"
)
func init() {
config = tendermint_test.ResetConfig("consensus_reactor_test")
config = ResetConfig("consensus_reactor_test")
}
//----------------------------------------------
@@ -24,10 +22,13 @@ func init() {
func startConsensusNet(t *testing.T, css []*ConsensusState, N int, subscribeEventRespond bool) ([]*ConsensusReactor, []chan interface{}) {
reactors := make([]*ConsensusReactor, N)
eventChans := make([]chan interface{}, N)
logger := consensusLogger()
for i := 0; i < N; i++ {
reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states
reactors[i].SetLogger(logger.With("validator", i))
eventSwitch := events.NewEventSwitch()
eventSwitch.SetLogger(logger.With("module", "events", "validator", i))
_, err := eventSwitch.Start()
if err != nil {
t.Fatalf("Failed to start switch: %v", err)
@@ -41,7 +42,7 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int, subscribeEven
}
}
// make connected switches and start all reactors
p2p.MakeConnectedSwitches(N, func(i int, s *p2p.Switch) *p2p.Switch {
p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("CONSENSUS", reactors[i])
return s
}, p2p.Connect2Switches)
@@ -98,7 +99,7 @@ func TestVotingPowerChange(t *testing.T) {
}, css)
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing changing the voting power of one validator a few times")
t.Log("---------------------------- Testing changing the voting power of one validator a few times")
val1PubKey := css[0].privValidator.(*types.PrivValidator).PubKey
updateValidatorTx := dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 25)
@@ -159,7 +160,7 @@ func TestValidatorSetChanges(t *testing.T) {
}, css)
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing adding one validator")
t.Log("---------------------------- Testing adding one validator")
newValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey
newValidatorTx1 := dummy.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), uint64(testMinPower))
@@ -185,7 +186,7 @@ func TestValidatorSetChanges(t *testing.T) {
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing changing the voting power of one validator")
t.Log("---------------------------- Testing changing the voting power of one validator")
updateValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey
updateValidatorTx1 := dummy.MakeValSetChangeTx(updateValidatorPubKey1.Bytes(), 25)
@@ -201,7 +202,7 @@ func TestValidatorSetChanges(t *testing.T) {
}
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing adding two validators at once")
t.Log("---------------------------- Testing adding two validators at once")
newValidatorPubKey2 := css[nVals+1].privValidator.(*types.PrivValidator).PubKey
newValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), uint64(testMinPower))
@@ -217,7 +218,7 @@ func TestValidatorSetChanges(t *testing.T) {
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing removing two validators at once")
t.Log("---------------------------- Testing removing two validators at once")
removeValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), 0)
removeValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), 0)
@@ -236,7 +237,7 @@ func TestReactorWithTimeoutCommit(t *testing.T) {
css := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
// override default SkipTimeoutCommit == true for tests
for i := 0; i < N; i++ {
css[i].timeoutParams.SkipTimeoutCommit = false
css[i].config.SkipTimeoutCommit = false
}
reactors, eventChans := startConsensusNet(t, css, N-1, false)
@@ -252,8 +253,8 @@ func TestReactorWithTimeoutCommit(t *testing.T) {
func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) {
timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) {
newBlockI := <-eventChans[j]
newBlock := newBlockI.(types.EventDataNewBlock).Block
log.Warn("Got block", "height", newBlock.Height, "validator", j)
newBlock := newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block
t.Logf("[WARN] Got block height=%v validator=%v", newBlock.Height, j)
err := validateBlock(newBlock, activeVals)
if err != nil {
t.Fatal(err)
@@ -264,7 +265,6 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}
eventChans[j] <- struct{}{}
wg.Done()
log.Warn("Done wait group", "height", newBlock.Height, "validator", j)
}, css)
}

View File

@@ -11,10 +11,10 @@ import (
"time"
abci "github.com/tendermint/abci/types"
auto "github.com/tendermint/go-autofile"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-wire"
wire "github.com/tendermint/go-wire"
auto "github.com/tendermint/tmlibs/autofile"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
@@ -50,7 +50,7 @@ func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan inte
// for logging
switch m := msg.Msg.(type) {
case types.EventDataRoundState:
log.Notice("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step)
cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step)
// these are playback checks
ticker := time.After(time.Second * 2)
if newStepCh != nil {
@@ -72,19 +72,19 @@ func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan inte
switch msg := m.Msg.(type) {
case *ProposalMessage:
p := msg.Proposal
log.Notice("Replay: Proposal", "height", p.Height, "round", p.Round, "header",
cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header",
p.BlockPartsHeader, "pol", p.POLRound, "peer", peerKey)
case *BlockPartMessage:
log.Notice("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerKey)
cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerKey)
case *VoteMessage:
v := msg.Vote
log.Notice("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type,
cs.Logger.Info("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type,
"blockID", v.BlockID, "peer", peerKey)
}
cs.handleMsg(m, cs.RoundState)
case timeoutInfo:
log.Notice("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration)
cs.Logger.Info("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration)
cs.handleTimeout(m, cs.RoundState)
default:
return fmt.Errorf("Replay: Unknown TimedWALMessage type: %v", reflect.TypeOf(msg.Msg))
@@ -108,18 +108,18 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
gr.Close()
}
if found {
return errors.New(Fmt("WAL should not contain #ENDHEIGHT %d.", csHeight))
return errors.New(cmn.Fmt("WAL should not contain #ENDHEIGHT %d.", csHeight))
}
// Search for last height marker
gr, found, err = cs.wal.group.Search("#ENDHEIGHT: ", makeHeightSearchFunc(csHeight-1))
if err == io.EOF {
log.Warn("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", csHeight-1)
cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", csHeight-1)
// if we upgraded from 0.9 to 0.9.1, we may have #HEIGHT instead
// TODO (0.10.0): remove this
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
if err == io.EOF {
log.Warn("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
cs.Logger.Error("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
return nil
} else if err != nil {
return err
@@ -134,7 +134,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
// TODO (0.10.0): remove this
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
if err == io.EOF {
log.Warn("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
cs.Logger.Error("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
return nil
} else if err != nil {
return err
@@ -143,10 +143,10 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
}
// TODO (0.10.0): uncomment
// return errors.New(Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1))
// return errors.New(cmn.Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1))
}
log.Notice("Catchup by replaying consensus messages", "height", csHeight)
cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight)
for {
line, err := gr.ReadLine()
@@ -164,7 +164,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
return err
}
}
log.Notice("Replay: Done")
cs.Logger.Info("Replay: Done")
return nil
}
@@ -199,49 +199,47 @@ func makeHeightSearchFunc(height int) auto.SearchFunc {
// we were last and using the WAL to recover there
type Handshaker struct {
config cfg.Config
state *sm.State
store types.BlockStore
logger log.Logger
nBlocks int // number of blocks applied to the state
}
func NewHandshaker(config cfg.Config, state *sm.State, store types.BlockStore) *Handshaker {
return &Handshaker{config, state, store, 0}
func NewHandshaker(state *sm.State, store types.BlockStore) *Handshaker {
return &Handshaker{state, store, log.NewNopLogger(), 0}
}
func (h *Handshaker) SetLogger(l log.Logger) {
h.logger = l
}
func (h *Handshaker) NBlocks() int {
return h.nBlocks
}
var ErrReplayLastBlockTimeout = errors.New("Timed out waiting for last block to be replayed")
// TODO: retry the handshake/replay if it fails ?
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
// handshake is done via info request on the query conn
res, err := proxyApp.Query().InfoSync()
if err != nil {
return errors.New(Fmt("Error calling Info: %v", err))
return errors.New(cmn.Fmt("Error calling Info: %v", err))
}
blockHeight := int(res.LastBlockHeight) // XXX: beware overflow
appHash := res.LastBlockAppHash
log.Notice("ABCI Handshake", "appHeight", blockHeight, "appHash", appHash)
h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
// TODO: check version
// replay blocks up to the latest in the blockstore
_, err = h.ReplayBlocks(appHash, blockHeight, proxyApp)
if err == ErrReplayLastBlockTimeout {
log.Warn("Failed to sync via handshake. Trying other means. If they fail, please increase the timeout_handshake parameter")
return nil
} else if err != nil {
return errors.New(Fmt("Error on replay: %v", err))
if err != nil {
return errors.New(cmn.Fmt("Error on replay: %v", err))
}
log.Notice("Completed ABCI Handshake - Tendermint and App are synced", "appHeight", blockHeight, "appHash", appHash)
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
// TODO: (on restart) replay mempool
@@ -254,7 +252,13 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p
storeBlockHeight := h.store.Height()
stateBlockHeight := h.state.LastBlockHeight
log.Notice("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight)
h.logger.Info("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight)
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain
if appBlockHeight == 0 {
validators := types.TM2PB.Validators(h.state.Validators)
proxyApp.Consensus().InitChainSync(validators)
}
// First handle edge cases and constraints on the storeBlockHeight
if storeBlockHeight == 0 {
@@ -266,11 +270,11 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p
} else if storeBlockHeight < stateBlockHeight {
// the state should never be ahead of the store (this is under tendermint's control)
PanicSanity(Fmt("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
cmn.PanicSanity(cmn.Fmt("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
} else if storeBlockHeight > stateBlockHeight+1 {
// store should be at most one ahead of the state (this is under tendermint's control)
PanicSanity(Fmt("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
cmn.PanicSanity(cmn.Fmt("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
}
// Now either store is equal to state, or one ahead.
@@ -300,20 +304,20 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p
// so replayBlock with the real app.
// NOTE: We could instead use the cs.WAL on cs.Start,
// but we'd have to allow the WAL to replay a block that wrote it's ENDHEIGHT
log.Info("Replay last block using real app")
h.logger.Info("Replay last block using real app")
return h.replayBlock(storeBlockHeight, proxyApp.Consensus())
} else if appBlockHeight == storeBlockHeight {
// We ran Commit, but didn't save the state, so replayBlock with mock app
abciResponses := h.state.LoadABCIResponses()
mockApp := newMockProxyApp(appHash, abciResponses)
log.Info("Replay last block using mock app")
h.logger.Info("Replay last block using mock app")
return h.replayBlock(storeBlockHeight, mockApp)
}
}
PanicSanity("Should never happen")
cmn.PanicSanity("Should never happen")
return nil, nil
}
@@ -331,9 +335,9 @@ func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, store
finalBlock -= 1
}
for i := appBlockHeight + 1; i <= finalBlock; i++ {
log.Info("Applying block", "height", i)
h.logger.Info("Applying block", "height", i)
block := h.store.LoadBlock(i)
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block)
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger)
if err != nil {
return nil, err
}
@@ -368,7 +372,7 @@ func (h *Handshaker) replayBlock(height int, proxyApp proxy.AppConnConsensus) ([
func (h *Handshaker) checkAppHash(appHash []byte) error {
if !bytes.Equal(h.state.AppHash, appHash) {
panic(errors.New(Fmt("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, h.state.AppHash)).Error())
panic(errors.New(cmn.Fmt("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, h.state.AppHash)).Error())
return nil
}
return nil
@@ -384,6 +388,7 @@ func newMockProxyApp(appHash []byte, abciResponses *sm.ABCIResponses) proxy.AppC
abciResponses: abciResponses,
})
cli, _ := clientCreator.NewABCIClient()
cli.Start()
return proxy.NewAppConnConsensus(cli)
}

View File

@@ -8,24 +8,23 @@ import (
"strconv"
"strings"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
dbm "github.com/tendermint/go-db"
bc "github.com/tendermint/tendermint/blockchain"
mempl "github.com/tendermint/tendermint/mempool"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
)
//--------------------------------------------------------
// replay messages interactively or all at once
func RunReplayFile(config cfg.Config, walFile string, console bool) {
consensusState := newConsensusStateForReplay(config)
func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) {
consensusState := newConsensusStateForReplay(config, csConfig)
if err := consensusState.ReplayFile(walFile, console); err != nil {
Exit(Fmt("Error during consensus replay: %v", err))
if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil {
cmn.Exit(cmn.Fmt("Error during consensus replay: %v", err))
}
}
@@ -114,7 +113,7 @@ func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
pb.fp = fp
pb.scanner = bufio.NewScanner(fp)
count = pb.count - count
log.Notice(Fmt("Reseting from %d to %d", pb.count, count))
fmt.Printf("Reseting from %d to %d\n", pb.count, count)
pb.count = 0
pb.cs = newCS
for i := 0; pb.scanner.Scan() && i < count; i++ {
@@ -127,8 +126,7 @@ func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
}
func (cs *ConsensusState) startForReplay() {
log.Warn("Replay commands are disabled until someone updates them and writes tests")
cs.Logger.Error("Replay commands are disabled until someone updates them and writes tests")
/* TODO:!
// since we replay tocks we just ignore ticks
go func() {
@@ -149,9 +147,9 @@ func (pb *playback) replayConsoleLoop() int {
bufReader := bufio.NewReader(os.Stdin)
line, more, err := bufReader.ReadLine()
if more {
Exit("input is too long")
cmn.Exit("input is too long")
} else if err != nil {
Exit(err.Error())
cmn.Exit(err.Error())
}
tokens := strings.Split(string(line), " ")
@@ -236,34 +234,31 @@ func (pb *playback) replayConsoleLoop() int {
//--------------------------------------------------------------------------------
// convenience for replay mode
func newConsensusStateForReplay(config cfg.Config) *ConsensusState {
func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *ConsensusState {
// Get BlockStore
blockStoreDB := dbm.NewDB("blockstore", config.GetString("db_backend"), config.GetString("db_dir"))
blockStoreDB := dbm.NewDB("blockstore", config.DBBackend, config.DBDir())
blockStore := bc.NewBlockStore(blockStoreDB)
// Get State
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
stateDB := dbm.NewDB("state", config.DBBackend, config.DBDir())
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
// Create proxyAppConn connection (consensus, mempool, query)
proxyApp := proxy.NewAppConns(config, proxy.DefaultClientCreator(config), NewHandshaker(config, state, blockStore))
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
proxyApp := proxy.NewAppConns(clientCreator, NewHandshaker(state, blockStore))
_, err := proxyApp.Start()
if err != nil {
Exit(Fmt("Error starting proxy app conns: %v", err))
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
}
// add the chainid to the global config
config.Set("chain_id", state.ChainID)
// Make event switch
eventSwitch := types.NewEventSwitch()
if _, err := eventSwitch.Start(); err != nil {
Exit(Fmt("Failed to start event switch: %v", err))
cmn.Exit(cmn.Fmt("Failed to start event switch: %v", err))
}
mempool := mempl.NewMempool(config, proxyApp.Mempool())
consensusState := NewConsensusState(csConfig, state.Copy(), proxyApp.Consensus(), blockStore, types.MockMempool{})
consensusState := NewConsensusState(config, state.Copy(), proxyApp.Consensus(), blockStore, mempool)
consensusState.SetEventSwitch(eventSwitch)
return consensusState
}

View File

@@ -12,21 +12,21 @@ import (
"testing"
"time"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/abci/example/dummy"
cmn "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-crypto"
dbm "github.com/tendermint/go-db"
"github.com/tendermint/go-wire"
crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tmlibs/log"
)
func init() {
config = tendermint_test.ResetConfig("consensus_replay_test")
config = ResetConfig("consensus_replay_test")
}
// These tests ensure we can always recover from failure at any part of the consensus process.
@@ -37,6 +37,12 @@ func init() {
// the `Handshake Tests` are for failures in applying the block.
// With the help of the WAL, we can recover from it all!
// NOTE: Files in this dir are generated by running the `build.sh` therein.
// It's a simple way to generate wals for a single block, or multiple blocks, with random transactions,
// and different part sizes. The output is not deterministic, and the stepChanges may need to be adjusted
// after running it (eg. sometimes small_block2 will have 5 block parts, sometimes 6).
// It should only have to be re-run if there is some breaking change to the consensus data structures (eg. blocks, votes)
// or to the behaviour of the app (eg. computes app hash differently)
var data_dir = path.Join(cmn.GoPath, "src/github.com/tendermint/tendermint/consensus", "test_data")
//------------------------------------------------------------------------------------------
@@ -53,7 +59,7 @@ var baseStepChanges = []int{3, 6, 8}
var testCases = []*testCase{
newTestCase("empty_block", baseStepChanges), // empty block (has 1 block part)
newTestCase("small_block1", baseStepChanges), // small block with txs in 1 block part
newTestCase("small_block2", []int{3, 10, 12}), // small block with txs across 5 smaller block parts
newTestCase("small_block2", []int{3, 11, 13}), // small block with txs across 6 smaller block parts
}
type testCase struct {
@@ -130,8 +136,14 @@ func waitForBlock(newBlockCh chan interface{}, thisCase *testCase, i int) {
func runReplayTest(t *testing.T, cs *ConsensusState, walFile string, newBlockCh chan interface{},
thisCase *testCase, i int) {
cs.config.Set("cs_wal_file", walFile)
cs.Start()
cs.config.SetWalFile(walFile)
started, err := cs.Start()
if err != nil {
t.Fatalf("Cannot start consensus: %v", err)
}
if !started {
t.Error("Consensus did not start")
}
// Wait to make a new block.
// This is just a signal that we haven't halted; its not something contained in the WAL itself.
// Assuming the consensus state is running, replay of any WAL, including the empty one,
@@ -154,9 +166,9 @@ func toPV(pv PrivValidator) *types.PrivValidator {
return pv.(*types.PrivValidator)
}
func setupReplayTest(thisCase *testCase, nLines int, crashAfter bool) (*ConsensusState, chan interface{}, string, string) {
fmt.Println("-------------------------------------")
log.Notice(cmn.Fmt("Starting replay test %v (of %d lines of WAL). Crash after = %v", thisCase.name, nLines, crashAfter))
func setupReplayTest(t *testing.T, thisCase *testCase, nLines int, crashAfter bool) (*ConsensusState, chan interface{}, string, string) {
t.Log("-------------------------------------")
t.Logf("Starting replay test %v (of %d lines of WAL). Crash after = %v", thisCase.name, nLines, crashAfter)
lineStep := nLines
if crashAfter {
@@ -175,7 +187,7 @@ func setupReplayTest(thisCase *testCase, nLines int, crashAfter bool) (*Consensu
toPV(cs.privValidator).LastHeight = 1 // first block
toPV(cs.privValidator).LastStep = thisCase.stepMap[lineStep]
log.Warn("setupReplayTest", "LastStep", toPV(cs.privValidator).LastStep)
t.Logf("[WARN] setupReplayTest LastStep=%v", toPV(cs.privValidator).LastStep)
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
@@ -200,7 +212,7 @@ func TestWALCrashAfterWrite(t *testing.T) {
for _, thisCase := range testCases {
split := strings.Split(thisCase.log, "\n")
for i := 0; i < len(split)-1; i++ {
cs, newBlockCh, _, walFile := setupReplayTest(thisCase, i+1, true)
cs, newBlockCh, _, walFile := setupReplayTest(t, thisCase, i+1, true)
runReplayTest(t, cs, walFile, newBlockCh, thisCase, i+1)
}
}
@@ -214,7 +226,7 @@ func TestWALCrashBeforeWritePropose(t *testing.T) {
for _, thisCase := range testCases {
lineNum := thisCase.proposeLine
// setup replay test where last message is a proposal
cs, newBlockCh, proposalMsg, walFile := setupReplayTest(thisCase, lineNum, false)
cs, newBlockCh, proposalMsg, walFile := setupReplayTest(t, thisCase, lineNum, false)
msg := readTimedWALMessage(t, proposalMsg)
proposal := msg.Msg.(msgInfo).Msg.(*ProposalMessage)
// Set LastSig
@@ -238,7 +250,7 @@ func TestWALCrashBeforeWritePrecommit(t *testing.T) {
func testReplayCrashBeforeWriteVote(t *testing.T, thisCase *testCase, lineNum int, eventString string) {
// setup replay test where last message is a vote
cs, newBlockCh, voteMsg, walFile := setupReplayTest(thisCase, lineNum, false)
cs, newBlockCh, voteMsg, walFile := setupReplayTest(t, thisCase, lineNum, false)
types.AddListenerForEvent(cs.evsw, "tester", eventString, func(data types.TMEventData) {
msg := readTimedWALMessage(t, voteMsg)
vote := msg.Msg.(msgInfo).Msg.(*VoteMessage)
@@ -297,7 +309,7 @@ func TestHandshakeReplayNone(t *testing.T) {
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
config := tendermint_test.ResetConfig("proxy_test_")
config := ResetConfig("proxy_test_")
// copy the many_blocks file
walBody, err := cmn.ReadFile(path.Join(data_dir, "many_blocks.cswal"))
@@ -305,15 +317,19 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
t.Fatal(err)
}
walFile := writeWAL(string(walBody))
config.Set("cs_wal_file", walFile)
config.Consensus.SetWalFile(walFile)
privVal := types.LoadPrivValidator(config.GetString("priv_validator_file"))
testPartSize = config.GetInt("block_part_size")
privVal := types.LoadPrivValidator(config.PrivValidatorFile())
testPartSize = config.Consensus.BlockPartSize
wal, err := NewWAL(walFile, false)
if err != nil {
t.Fatal(err)
}
wal.SetLogger(log.TestingLogger())
if _, err := wal.Start(); err != nil {
t.Fatal(err)
}
chain, commits, err := makeBlockchainFromWAL(wal)
if err != nil {
t.Fatalf(err.Error())
@@ -327,19 +343,19 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
latestAppHash := buildTMStateFromChain(config, state, chain, mode)
// make a new client creator
dummyApp := dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "2"))
dummyApp := dummy.NewPersistentDummyApplication(path.Join(config.DBDir(), "2"))
clientCreator2 := proxy.NewLocalClientCreator(dummyApp)
if nBlocks > 0 {
// run nBlocks against a new client to build up the app state.
// use a throwaway tendermint state
proxyApp := proxy.NewAppConns(config, clientCreator2, nil)
proxyApp := proxy.NewAppConns(clientCreator2, nil)
state, _ := stateAndStore(config, privVal.PubKey)
buildAppStateFromChain(proxyApp, state, chain, nBlocks, mode)
}
// now start the app using the handshake - it should sync
handshaker := NewHandshaker(config, state, store)
proxyApp := proxy.NewAppConns(config, clientCreator2, handshaker)
handshaker := NewHandshaker(state, store)
proxyApp := proxy.NewAppConns(clientCreator2, handshaker)
if _, err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
@@ -380,6 +396,10 @@ func buildAppStateFromChain(proxyApp proxy.AppConns,
if _, err := proxyApp.Start(); err != nil {
panic(err)
}
validators := types.TM2PB.Validators(state.Validators)
proxyApp.Consensus().InitChainSync(validators)
defer proxyApp.Stop()
switch mode {
case 0:
@@ -402,15 +422,18 @@ func buildAppStateFromChain(proxyApp proxy.AppConns,
}
func buildTMStateFromChain(config cfg.Config, state *sm.State, chain []*types.Block, mode uint) []byte {
func buildTMStateFromChain(config *cfg.Config, state *sm.State, chain []*types.Block, mode uint) []byte {
// run the whole chain against this client to build up the tendermint state
clientCreator := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "1")))
proxyApp := proxy.NewAppConns(config, clientCreator, nil) // sm.NewHandshaker(config, state, store, ReplayLastBlock))
clientCreator := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.DBDir(), "1")))
proxyApp := proxy.NewAppConns(clientCreator, nil) // sm.NewHandshaker(config, state, store, ReplayLastBlock))
if _, err := proxyApp.Start(); err != nil {
panic(err)
}
defer proxyApp.Stop()
validators := types.TM2PB.Validators(state.Validators)
proxyApp.Consensus().InitChainSync(validators)
var latestAppHash []byte
switch mode {
@@ -452,7 +475,7 @@ func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) {
}
defer gr.Close()
log.Notice("Build a blockchain by reading from the WAL")
// log.Notice("Build a blockchain by reading from the WAL")
var blockParts *types.PartSet
var blocks []*types.Block
@@ -596,28 +619,26 @@ func makeBlockchain(t *testing.T, chainID string, nBlocks int, privVal *types.Pr
}
// fresh state and mock store
func stateAndStore(config cfg.Config, pubKey crypto.PubKey) (*sm.State, *mockBlockStore) {
func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (*sm.State, *mockBlockStore) {
stateDB := dbm.NewMemDB()
return sm.MakeGenesisState(stateDB, &types.GenesisDoc{
ChainID: config.GetString("chain_id"),
Validators: []types.GenesisValidator{
types.GenesisValidator{pubKey, 10000, "test"},
},
AppHash: nil,
}), NewMockBlockStore(config)
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
state.SetLogger(log.TestingLogger().With("module", "state"))
store := NewMockBlockStore(config)
return state, store
}
//----------------------------------
// mock block store
type mockBlockStore struct {
config cfg.Config
config *cfg.Config
chain []*types.Block
commits []*types.Commit
}
// TODO: NewBlockStore(db.NewMemDB) ...
func NewMockBlockStore(config cfg.Config) *mockBlockStore {
func NewMockBlockStore(config *cfg.Config) *mockBlockStore {
return &mockBlockStore{config, nil, nil}
}
@@ -626,7 +647,7 @@ func (bs *mockBlockStore) LoadBlock(height int) *types.Block { return bs.chain[h
func (bs *mockBlockStore) LoadBlockMeta(height int) *types.BlockMeta {
block := bs.chain[height-1]
return &types.BlockMeta{
BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.config.GetInt("block_part_size")).Header()},
BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.config.Consensus.BlockPartSize).Header()},
Header: block.Header,
}
}

View File

@@ -9,65 +9,18 @@ import (
"sync"
"time"
"github.com/ebuchman/fail-test"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-wire"
fail "github.com/ebuchman/fail-test"
wire "github.com/tendermint/go-wire"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
)
//-----------------------------------------------------------------------------
// Timeout Parameters
// TimeoutParams holds timeouts and deltas for each round step.
// All timeouts and deltas in milliseconds.
type TimeoutParams struct {
Propose0 int
ProposeDelta int
Prevote0 int
PrevoteDelta int
Precommit0 int
PrecommitDelta int
Commit0 int
SkipTimeoutCommit bool
}
// Wait this long for a proposal
func (tp *TimeoutParams) Propose(round int) time.Duration {
return time.Duration(tp.Propose0+tp.ProposeDelta*round) * time.Millisecond
}
// After receiving any +2/3 prevote, wait this long for stragglers
func (tp *TimeoutParams) Prevote(round int) time.Duration {
return time.Duration(tp.Prevote0+tp.PrevoteDelta*round) * time.Millisecond
}
// After receiving any +2/3 precommits, wait this long for stragglers
func (tp *TimeoutParams) Precommit(round int) time.Duration {
return time.Duration(tp.Precommit0+tp.PrecommitDelta*round) * time.Millisecond
}
// After receiving +2/3 precommits for a single block (a commit), wait this long for stragglers in the next height's RoundStepNewHeight
func (tp *TimeoutParams) Commit(t time.Time) time.Time {
return t.Add(time.Duration(tp.Commit0) * time.Millisecond)
}
// InitTimeoutParamsFromConfig initializes parameters from config
func InitTimeoutParamsFromConfig(config cfg.Config) *TimeoutParams {
return &TimeoutParams{
Propose0: config.GetInt("timeout_propose"),
ProposeDelta: config.GetInt("timeout_propose_delta"),
Prevote0: config.GetInt("timeout_prevote"),
PrevoteDelta: config.GetInt("timeout_prevote_delta"),
Precommit0: config.GetInt("timeout_precommit"),
PrecommitDelta: config.GetInt("timeout_precommit_delta"),
Commit0: config.GetInt("timeout_commit"),
SkipTimeoutCommit: config.GetBool("skip_timeout_commit"),
}
}
// Config
//-----------------------------------------------------------------------------
// Errors
@@ -222,40 +175,50 @@ type PrivValidator interface {
// Tracks consensus state across block heights and rounds.
type ConsensusState struct {
BaseService
cmn.BaseService
config cfg.Config
// config details
config *cfg.ConsensusConfig
privValidator PrivValidator // for signing votes
// services for creating and executing blocks
proxyAppConn proxy.AppConnConsensus
blockStore types.BlockStore
mempool types.Mempool
privValidator PrivValidator // for signing votes
// internal state
mtx sync.Mutex
RoundState
state *sm.State // State until height-1.
peerMsgQueue chan msgInfo // serializes msgs affecting state (proposals, block parts, votes)
internalMsgQueue chan msgInfo // like peerMsgQueue but for our own proposals, parts, votes
timeoutTicker TimeoutTicker // ticker for timeouts
timeoutParams *TimeoutParams // parameters and functions for timeout intervals
// state changes may be triggered by msgs from peers,
// msgs from ourself, or by timeouts
peerMsgQueue chan msgInfo
internalMsgQueue chan msgInfo
timeoutTicker TimeoutTicker
// we use PubSub to trigger msg broadcasts in the reactor,
// and to notify external subscribers, eg. through a websocket
evsw types.EventSwitch
// a Write-Ahead Log ensures we can recover from any kind of crash
// and helps us avoid signing conflicting votes
wal *WAL
replayMode bool // so we don't log signing errors during replay
nSteps int // used for testing to limit the number of transitions the state makes
// for tests where we want to limit the number of transitions the state makes
nSteps int
// allow certain function to be overwritten for testing
// some functions can be overwritten for testing
decideProposal func(height, round int)
doPrevote func(height, round int)
setProposal func(proposal *types.Proposal) error
// closed when we finish shutting down
done chan struct{}
}
func NewConsensusState(config cfg.Config, state *sm.State, proxyAppConn proxy.AppConnConsensus, blockStore types.BlockStore, mempool types.Mempool) *ConsensusState {
func NewConsensusState(config *cfg.ConsensusConfig, state *sm.State, proxyAppConn proxy.AppConnConsensus, blockStore types.BlockStore, mempool types.Mempool) *ConsensusState {
cs := &ConsensusState{
config: config,
proxyAppConn: proxyAppConn,
@@ -264,7 +227,6 @@ func NewConsensusState(config cfg.Config, state *sm.State, proxyAppConn proxy.Ap
peerMsgQueue: make(chan msgInfo, msgQueueSize),
internalMsgQueue: make(chan msgInfo, msgQueueSize),
timeoutTicker: NewTimeoutTicker(),
timeoutParams: InitTimeoutParamsFromConfig(config),
done: make(chan struct{}),
}
// set function defaults (may be overwritten before calling Start)
@@ -276,13 +238,19 @@ func NewConsensusState(config cfg.Config, state *sm.State, proxyAppConn proxy.Ap
// Don't call scheduleRound0 yet.
// We do that upon Start().
cs.reconstructLastCommit(state)
cs.BaseService = *NewBaseService(log, "ConsensusState", cs)
cs.BaseService = *cmn.NewBaseService(nil, "ConsensusState", cs)
return cs
}
//----------------------------------------
// Public interface
// SetLogger implements Service.
func (cs *ConsensusState) SetLogger(l log.Logger) {
cs.BaseService.Logger = l
cs.timeoutTicker.SetLogger(l)
}
// SetEventSwitch implements events.Eventable
func (cs *ConsensusState) SetEventSwitch(evsw types.EventSwitch) {
cs.evsw = evsw
@@ -290,7 +258,7 @@ func (cs *ConsensusState) SetEventSwitch(evsw types.EventSwitch) {
func (cs *ConsensusState) String() string {
// better not to access shared variables
return Fmt("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step)
return cmn.Fmt("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step)
}
func (cs *ConsensusState) GetState() *sm.State {
@@ -341,9 +309,9 @@ func (cs *ConsensusState) LoadCommit(height int) *types.Commit {
func (cs *ConsensusState) OnStart() error {
walFile := cs.config.GetString("cs_wal_file")
walFile := cs.config.WalFile()
if err := cs.OpenWAL(walFile); err != nil {
log.Error("Error loading ConsensusState wal", "error", err.Error())
cs.Logger.Error("Error loading ConsensusState wal", "error", err.Error())
return err
}
@@ -357,7 +325,7 @@ func (cs *ConsensusState) OnStart() error {
// we may have lost some votes if the process crashed
// reload from consensus log to catchup
if err := cs.catchupReplay(cs.Height); err != nil {
log.Error("Error on catchup replay. Proceeding to start ConsensusState anyway", "error", err.Error())
cs.Logger.Error("Error on catchup replay. Proceeding to start ConsensusState anyway", "error", err.Error())
// NOTE: if we ever do return an error here,
// make sure to stop the timeoutTicker
}
@@ -398,18 +366,22 @@ func (cs *ConsensusState) Wait() {
// Open file to log all consensus messages and timeouts for deterministic accountability
func (cs *ConsensusState) OpenWAL(walFile string) (err error) {
err = EnsureDir(path.Dir(walFile), 0700)
err = cmn.EnsureDir(path.Dir(walFile), 0700)
if err != nil {
log.Error("Error ensuring ConsensusState wal dir", "error", err.Error())
cs.Logger.Error("Error ensuring ConsensusState wal dir", "error", err.Error())
return err
}
cs.mtx.Lock()
defer cs.mtx.Unlock()
wal, err := NewWAL(walFile, cs.config.GetBool("cs_wal_light"))
wal, err := NewWAL(walFile, cs.config.WalLight)
if err != nil {
return err
}
wal.SetLogger(cs.Logger.With("wal", walFile))
if _, err := wal.Start(); err != nil {
return err
}
cs.wal = wal
return nil
}
@@ -481,7 +453,7 @@ func (cs *ConsensusState) updateRoundStep(round int, step RoundStepType) {
// enterNewRound(height, 0) at cs.StartTime.
func (cs *ConsensusState) scheduleRound0(rs *RoundState) {
//log.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime)
//cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime)
sleepDuration := rs.StartTime.Sub(time.Now())
cs.scheduleTimeout(sleepDuration, rs.Height, 0, RoundStepNewHeight)
}
@@ -500,7 +472,7 @@ func (cs *ConsensusState) sendInternalMessage(mi msgInfo) {
// be processed out of order.
// TODO: use CList here for strict determinism and
// attempt push to internalMsgQueue in receiveRoutine
log.Warn("Internal msg queue is full. Using a go-routine")
cs.Logger.Info("Internal msg queue is full. Using a go-routine")
go func() { cs.internalMsgQueue <- mi }()
}
}
@@ -512,18 +484,18 @@ func (cs *ConsensusState) reconstructLastCommit(state *sm.State) {
return
}
seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight)
lastPrecommits := types.NewVoteSet(cs.config.GetString("chain_id"), state.LastBlockHeight, seenCommit.Round(), types.VoteTypePrecommit, state.LastValidators)
lastPrecommits := types.NewVoteSet(cs.state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.VoteTypePrecommit, state.LastValidators)
for _, precommit := range seenCommit.Precommits {
if precommit == nil {
continue
}
added, err := lastPrecommits.AddVote(precommit)
if !added || err != nil {
PanicCrisis(Fmt("Failed to reconstruct LastCommit: %v", err))
cmn.PanicCrisis(cmn.Fmt("Failed to reconstruct LastCommit: %v", err))
}
}
if !lastPrecommits.HasTwoThirdsMajority() {
PanicSanity("Failed to reconstruct LastCommit: Does not have +2/3 maj")
cmn.PanicSanity("Failed to reconstruct LastCommit: Does not have +2/3 maj")
}
cs.LastCommit = lastPrecommits
}
@@ -532,13 +504,13 @@ func (cs *ConsensusState) reconstructLastCommit(state *sm.State) {
// The round becomes 0 and cs.Step becomes RoundStepNewHeight.
func (cs *ConsensusState) updateToState(state *sm.State) {
if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight {
PanicSanity(Fmt("updateToState() expected state height of %v but found %v",
cmn.PanicSanity(cmn.Fmt("updateToState() expected state height of %v but found %v",
cs.Height, state.LastBlockHeight))
}
if cs.state != nil && cs.state.LastBlockHeight+1 != cs.Height {
// This might happen when someone else is mutating cs.state.
// Someone forgot to pass in state.Copy() somewhere?!
PanicSanity(Fmt("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v",
cmn.PanicSanity(cmn.Fmt("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v",
cs.state.LastBlockHeight+1, cs.Height))
}
@@ -546,7 +518,7 @@ func (cs *ConsensusState) updateToState(state *sm.State) {
// This happens when SwitchToConsensus() is called in the reactor.
// We don't want to reset e.g. the Votes.
if cs.state != nil && (state.LastBlockHeight <= cs.state.LastBlockHeight) {
log.Notice("Ignoring updateToState()", "newHeight", state.LastBlockHeight+1, "oldHeight", cs.state.LastBlockHeight+1)
cs.Logger.Info("Ignoring updateToState()", "newHeight", state.LastBlockHeight+1, "oldHeight", cs.state.LastBlockHeight+1)
return
}
@@ -555,7 +527,7 @@ func (cs *ConsensusState) updateToState(state *sm.State) {
lastPrecommits := (*types.VoteSet)(nil)
if cs.CommitRound > -1 && cs.Votes != nil {
if !cs.Votes.Precommits(cs.CommitRound).HasTwoThirdsMajority() {
PanicSanity("updateToState(state) called but last Precommit round didn't have +2/3")
cmn.PanicSanity("updateToState(state) called but last Precommit round didn't have +2/3")
}
lastPrecommits = cs.Votes.Precommits(cs.CommitRound)
}
@@ -572,9 +544,9 @@ func (cs *ConsensusState) updateToState(state *sm.State) {
// to be gathered for the first block.
// And alternative solution that relies on clocks:
// cs.StartTime = state.LastBlockTime.Add(timeoutCommit)
cs.StartTime = cs.timeoutParams.Commit(time.Now())
cs.StartTime = cs.config.Commit(time.Now())
} else {
cs.StartTime = cs.timeoutParams.Commit(cs.CommitTime)
cs.StartTime = cs.config.Commit(cs.CommitTime)
}
cs.Validators = validators
cs.Proposal = nil
@@ -583,7 +555,7 @@ func (cs *ConsensusState) updateToState(state *sm.State) {
cs.LockedRound = 0
cs.LockedBlock = nil
cs.LockedBlockParts = nil
cs.Votes = NewHeightVoteSet(cs.config.GetString("chain_id"), height, validators)
cs.Votes = NewHeightVoteSet(state.ChainID, height, validators)
cs.CommitRound = -1
cs.LastCommit = lastPrecommits
cs.LastValidators = state.LastValidators
@@ -615,7 +587,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
for {
if maxSteps > 0 {
if cs.nSteps >= maxSteps {
log.Warn("reached max steps. exiting receive routine")
cs.Logger.Info("reached max steps. exiting receive routine")
cs.nSteps = 0
return
}
@@ -688,19 +660,19 @@ func (cs *ConsensusState) handleMsg(mi msgInfo, rs RoundState) {
// the peer is sending us CatchupCommit precommits.
// We could make note of this and help filter in broadcastHasVoteMessage().
default:
log.Warn("Unknown msg type", reflect.TypeOf(msg))
cs.Logger.Error("Unknown msg type", reflect.TypeOf(msg))
}
if err != nil {
log.Error("Error with msg", "type", reflect.TypeOf(msg), "peer", peerKey, "error", err, "msg", msg)
cs.Logger.Error("Error with msg", "type", reflect.TypeOf(msg), "peer", peerKey, "error", err, "msg", msg)
}
}
func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs RoundState) {
log.Debug("Received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
cs.Logger.Debug("Received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
// timeouts must be for current height, round, step
if ti.Height != rs.Height || ti.Round < rs.Round || (ti.Round == rs.Round && ti.Step < rs.Step) {
log.Debug("Ignoring tock because we're ahead", "height", rs.Height, "round", rs.Round, "step", rs.Step)
cs.Logger.Debug("Ignoring tock because we're ahead", "height", rs.Height, "round", rs.Round, "step", rs.Step)
return
}
@@ -723,7 +695,7 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs RoundState) {
types.FireEventTimeoutWait(cs.evsw, cs.RoundStateEvent())
cs.enterNewRound(ti.Height, ti.Round+1)
default:
panic(Fmt("Invalid timeout step: %v", ti.Step))
panic(cmn.Fmt("Invalid timeout step: %v", ti.Step))
}
}
@@ -738,15 +710,15 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs RoundState) {
// NOTE: cs.StartTime was already set for height.
func (cs *ConsensusState) enterNewRound(height int, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != RoundStepNewHeight) {
log.Debug(Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
if now := time.Now(); cs.StartTime.After(now) {
log.Warn("Need to set a buffer and log.Warn() here for sanity.", "startTime", cs.StartTime, "now", now)
cs.Logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now)
}
log.Notice(Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
// Increment validators if necessary
validators := cs.Validators
@@ -780,10 +752,10 @@ func (cs *ConsensusState) enterNewRound(height int, round int) {
// Enter: from NewRound(height,round).
func (cs *ConsensusState) enterPropose(height int, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && RoundStepPropose <= cs.Step) {
log.Debug(Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
log.Info(Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPropose:
@@ -799,19 +771,25 @@ func (cs *ConsensusState) enterPropose(height int, round int) {
}()
// If we don't get the proposal and all block parts quick enough, enterPrevote
cs.scheduleTimeout(cs.timeoutParams.Propose(round), height, round, RoundStepPropose)
cs.scheduleTimeout(cs.config.Propose(round), height, round, RoundStepPropose)
// Nothing more to do if we're not a validator
if cs.privValidator == nil {
cs.Logger.Debug("This node is not a validator")
return
}
if !bytes.Equal(cs.Validators.GetProposer().Address, cs.privValidator.GetAddress()) {
log.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
cs.Logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
if cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
cs.Logger.Debug("This node is a validator")
} else {
cs.Logger.Debug("This node is not a validator")
}
} else {
log.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
cs.Logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
cs.Logger.Debug("This node is a validator")
cs.decideProposal(height, round)
}
}
@@ -849,11 +827,11 @@ func (cs *ConsensusState) defaultDecideProposal(height, round int) {
part := blockParts.GetPart(i)
cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""})
}
log.Info("Signed proposal", "height", height, "round", round, "proposal", proposal)
log.Debug(Fmt("Signed proposal block: %v", block))
cs.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal)
cs.Logger.Debug(cmn.Fmt("Signed proposal block: %v", block))
} else {
if !cs.replayMode {
log.Warn("enterPropose: Error signing proposal", "height", height, "round", round, "error", err)
cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "error", err)
}
}
}
@@ -888,15 +866,15 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
commit = cs.LastCommit.MakeCommit()
} else {
// This shouldn't happen.
log.Error("enterPropose: Cannot propose anything: No commit for the previous block.")
cs.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block.")
return
}
// Mempool validated transactions
txs := cs.mempool.Reap(cs.config.GetInt("block_size"))
txs := cs.mempool.Reap(cs.config.MaxBlockSizeTxs)
return types.MakeBlock(cs.Height, cs.state.ChainID, txs, commit,
cs.state.LastBlockID, cs.state.Validators.Hash(), cs.state.AppHash, cs.config.GetInt("block_part_size"))
cs.state.LastBlockID, cs.state.Validators.Hash(), cs.state.AppHash, cs.config.BlockPartSize)
}
// Enter: `timeoutPropose` after entering Propose.
@@ -906,7 +884,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
// Otherwise vote nil.
func (cs *ConsensusState) enterPrevote(height int, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && RoundStepPrevote <= cs.Step) {
log.Debug(Fmt("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(cmn.Fmt("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
@@ -924,7 +902,7 @@ func (cs *ConsensusState) enterPrevote(height int, round int) {
// TODO: catchup event?
}
log.Info(Fmt("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Info(cmn.Fmt("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
// Sign and broadcast vote as necessary
cs.doPrevote(height, round)
@@ -936,14 +914,14 @@ func (cs *ConsensusState) enterPrevote(height int, round int) {
func (cs *ConsensusState) defaultDoPrevote(height int, round int) {
// If a block is locked, prevote that.
if cs.LockedBlock != nil {
log.Notice("enterPrevote: Block was locked")
cs.Logger.Info("enterPrevote: Block was locked")
cs.signAddVote(types.VoteTypePrevote, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header())
return
}
// If ProposalBlock is nil, prevote nil.
if cs.ProposalBlock == nil {
log.Warn("enterPrevote: ProposalBlock is nil")
cs.Logger.Info("enterPrevote: ProposalBlock is nil")
cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{})
return
}
@@ -952,7 +930,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int, round int) {
err := cs.state.ValidateBlock(cs.ProposalBlock)
if err != nil {
// ProposalBlock is invalid, prevote nil.
log.Warn("enterPrevote: ProposalBlock is invalid", "error", err)
cs.Logger.Error("enterPrevote: ProposalBlock is invalid", "error", err)
cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{})
return
}
@@ -967,13 +945,13 @@ func (cs *ConsensusState) defaultDoPrevote(height int, round int) {
// Enter: any +2/3 prevotes at next round.
func (cs *ConsensusState) enterPrevoteWait(height int, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && RoundStepPrevoteWait <= cs.Step) {
log.Debug(Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
if !cs.Votes.Prevotes(round).HasTwoThirdsAny() {
PanicSanity(Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
}
log.Info(Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrevoteWait:
@@ -982,7 +960,7 @@ func (cs *ConsensusState) enterPrevoteWait(height int, round int) {
}()
// Wait for some more prevotes; enterPrecommit
cs.scheduleTimeout(cs.timeoutParams.Prevote(round), height, round, RoundStepPrevoteWait)
cs.scheduleTimeout(cs.config.Prevote(round), height, round, RoundStepPrevoteWait)
}
// Enter: +2/3 precomits for block or nil.
@@ -993,11 +971,11 @@ func (cs *ConsensusState) enterPrevoteWait(height int, round int) {
// else, precommit nil otherwise.
func (cs *ConsensusState) enterPrecommit(height int, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && RoundStepPrecommit <= cs.Step) {
log.Debug(Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
log.Info(Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrecommit:
@@ -1010,9 +988,9 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
// If we don't have a polka, we must precommit nil
if !ok {
if cs.LockedBlock != nil {
log.Notice("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
cs.Logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil")
} else {
log.Notice("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
cs.Logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
}
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
return
@@ -1024,15 +1002,15 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
// the latest POLRound should be this round
polRound, _ := cs.Votes.POLInfo()
if polRound < round {
PanicSanity(Fmt("This POLRound should be %v but got %", round, polRound))
cmn.PanicSanity(cmn.Fmt("This POLRound should be %v but got %", round, polRound))
}
// +2/3 prevoted nil. Unlock and precommit nil.
if len(blockID.Hash) == 0 {
if cs.LockedBlock == nil {
log.Notice("enterPrecommit: +2/3 prevoted for nil.")
cs.Logger.Info("enterPrecommit: +2/3 prevoted for nil.")
} else {
log.Notice("enterPrecommit: +2/3 prevoted for nil. Unlocking")
cs.Logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking")
cs.LockedRound = 0
cs.LockedBlock = nil
cs.LockedBlockParts = nil
@@ -1046,7 +1024,7 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
// If we're already locked on that block, precommit it, and update the LockedRound
if cs.LockedBlock.HashesTo(blockID.Hash) {
log.Notice("enterPrecommit: +2/3 prevoted locked block. Relocking")
cs.Logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking")
cs.LockedRound = round
types.FireEventRelock(cs.evsw, cs.RoundStateEvent())
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
@@ -1055,10 +1033,10 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
// If +2/3 prevoted for proposal block, stage and precommit it
if cs.ProposalBlock.HashesTo(blockID.Hash) {
log.Notice("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
cs.Logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
// Validate the block.
if err := cs.state.ValidateBlock(cs.ProposalBlock); err != nil {
PanicConsensus(Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
}
cs.LockedRound = round
cs.LockedBlock = cs.ProposalBlock
@@ -1087,13 +1065,13 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
// Enter: any +2/3 precommits for next round.
func (cs *ConsensusState) enterPrecommitWait(height int, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && RoundStepPrecommitWait <= cs.Step) {
log.Debug(Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
if !cs.Votes.Precommits(round).HasTwoThirdsAny() {
PanicSanity(Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
}
log.Info(Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrecommitWait:
@@ -1102,17 +1080,17 @@ func (cs *ConsensusState) enterPrecommitWait(height int, round int) {
}()
// Wait for some more precommits; enterNewRound
cs.scheduleTimeout(cs.timeoutParams.Precommit(round), height, round, RoundStepPrecommitWait)
cs.scheduleTimeout(cs.config.Precommit(round), height, round, RoundStepPrecommitWait)
}
// Enter: +2/3 precommits for block
func (cs *ConsensusState) enterCommit(height int, commitRound int) {
if cs.Height != height || RoundStepCommit <= cs.Step {
log.Debug(Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
return
}
log.Info(Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
cs.Logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterCommit:
@@ -1128,7 +1106,7 @@ func (cs *ConsensusState) enterCommit(height int, commitRound int) {
blockID, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority()
if !ok {
PanicSanity("RunActionCommit() expects +2/3 precommits")
cmn.PanicSanity("RunActionCommit() expects +2/3 precommits")
}
// The Locked* fields no longer matter.
@@ -1155,20 +1133,21 @@ func (cs *ConsensusState) enterCommit(height int, commitRound int) {
// If we have the block AND +2/3 commits for it, finalize.
func (cs *ConsensusState) tryFinalizeCommit(height int) {
if cs.Height != height {
PanicSanity(Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
}
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
if !ok || len(blockID.Hash) == 0 {
log.Warn("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.", "height", height)
cs.Logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for <nil>.", "height", height)
return
}
if !cs.ProposalBlock.HashesTo(blockID.Hash) {
// TODO: this happens every time if we're not a validator (ugly logs)
// TODO: ^^ wait, why does it matter that we're a validator?
log.Warn("Attempt to finalize failed. We don't have the commit block.", "height", height, "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash)
cs.Logger.Error("Attempt to finalize failed. We don't have the commit block.", "height", height, "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash)
return
}
// go
cs.finalizeCommit(height)
}
@@ -1176,7 +1155,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int) {
// Increment height and goto RoundStepNewHeight
func (cs *ConsensusState) finalizeCommit(height int) {
if cs.Height != height || cs.Step != RoundStepCommit {
log.Debug(Fmt("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(cmn.Fmt("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step))
return
}
@@ -1184,21 +1163,21 @@ func (cs *ConsensusState) finalizeCommit(height int) {
block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts
if !ok {
PanicSanity(Fmt("Cannot finalizeCommit, commit does not have two thirds majority"))
cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, commit does not have two thirds majority"))
}
if !blockParts.HasHeader(blockID.PartsHeader) {
PanicSanity(Fmt("Expected ProposalBlockParts header to be commit header"))
cmn.PanicSanity(cmn.Fmt("Expected ProposalBlockParts header to be commit header"))
}
if !block.HashesTo(blockID.Hash) {
PanicSanity(Fmt("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
}
if err := cs.state.ValidateBlock(block); err != nil {
PanicConsensus(Fmt("+2/3 committed an invalid block: %v", err))
cmn.PanicConsensus(cmn.Fmt("+2/3 committed an invalid block: %v", err))
}
log.Notice(Fmt("Finalizing commit of block with %d txs", block.NumTxs),
cs.Logger.Info(cmn.Fmt("Finalizing commit of block with %d txs", block.NumTxs),
"height", block.Height, "hash", block.Hash(), "root", block.AppHash)
log.Info(Fmt("%v", block))
cs.Logger.Info(cmn.Fmt("%v", block))
fail.Fail() // XXX
@@ -1211,7 +1190,7 @@ func (cs *ConsensusState) finalizeCommit(height int) {
cs.blockStore.SaveBlock(block, blockParts, seenCommit)
} else {
// Happens during replay if we already saved the block but didn't commit
log.Info("Calling finalizeCommit on already stored block", "height", block.Height)
cs.Logger.Info("Calling finalizeCommit on already stored block", "height", block.Height)
}
fail.Fail() // XXX
@@ -1239,7 +1218,7 @@ func (cs *ConsensusState) finalizeCommit(height int) {
// NOTE: the block.AppHash wont reflect these txs until the next block
err := stateCopy.ApplyBlock(eventCache, cs.proxyAppConn, block, blockParts.Header(), cs.mempool)
if err != nil {
log.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "error", err)
cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "error", err)
return
}
@@ -1249,7 +1228,7 @@ func (cs *ConsensusState) finalizeCommit(height int) {
// NOTE: If we fail before firing, these events will never fire
//
// TODO: Either
// * Fire before persisting state, in ApplyBlock
// * Fire before persisting state, in ApplyBlock
// * Fire on start up if we haven't written any new WAL msgs
// Both options mean we may fire more than once. Is that fine ?
types.FireEventNewBlock(cs.evsw, types.EventDataNewBlock{block})
@@ -1332,7 +1311,7 @@ func (cs *ConsensusState) addProposalBlockPart(height int, part *types.Part, ver
var err error
cs.ProposalBlock = wire.ReadBinary(&types.Block{}, cs.ProposalBlockParts.GetReader(), types.MaxBlockSize, &n, &err).(*types.Block)
// NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal
log.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
if cs.Step == RoundStepPropose && cs.isProposalComplete() {
// Move onto the next step
cs.enterPrevote(height, cs.Round)
@@ -1356,10 +1335,10 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerKey string) error {
return err
} else if _, ok := err.(*types.ErrVoteConflictingVotes); ok {
if peerKey == "" {
log.Warn("Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, "type", vote.Type)
cs.Logger.Error("Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, "type", vote.Type)
return err
}
log.Warn("Found conflicting vote. Publish evidence (TODO)")
cs.Logger.Error("Found conflicting vote. Publish evidence (TODO)")
/* TODO
evidenceTx := &types.DupeoutTx{
Address: address,
@@ -1371,7 +1350,7 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerKey string) error {
return err
} else {
// Probably an invalid signature. Bad peer.
log.Warn("Error attempting to add vote", "error", err)
cs.Logger.Error("Error attempting to add vote", "error", err)
return ErrAddingVote
}
}
@@ -1381,7 +1360,7 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerKey string) error {
//-----------------------------------------------------------------------------
func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool, err error) {
log.Debug("addVote", "voteHeight", vote.Height, "voteType", vote.Type, "csHeight", cs.Height)
cs.Logger.Debug("addVote", "voteHeight", vote.Height, "voteType", vote.Type, "csHeight", cs.Height)
// A precommit for the previous height?
// These come in while we wait timeoutCommit
@@ -1393,11 +1372,11 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool,
}
added, err = cs.LastCommit.AddVote(vote)
if added {
log.Info(Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
types.FireEventVote(cs.evsw, types.EventDataVote{vote})
// if we can skip timeoutCommit and have all the votes now,
if cs.timeoutParams.SkipTimeoutCommit && cs.LastCommit.HasAll() {
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
// go straight to new round (skip timeout commit)
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, RoundStepNewHeight)
cs.enterNewRound(cs.Height, 0)
@@ -1417,7 +1396,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool,
switch vote.Type {
case types.VoteTypePrevote:
prevotes := cs.Votes.Prevotes(vote.Round)
log.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort())
cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort())
// First, unlock if prevotes is a valid POL.
// >> lockRound < POLRound <= unlockOrChangeLockRound (see spec)
// NOTE: If (lockRound < POLRound) but !(POLRound <= unlockOrChangeLockRound),
@@ -1426,7 +1405,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool,
if (cs.LockedBlock != nil) && (cs.LockedRound < vote.Round) && (vote.Round <= cs.Round) {
blockID, ok := prevotes.TwoThirdsMajority()
if ok && !cs.LockedBlock.HashesTo(blockID.Hash) {
log.Notice("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
cs.LockedRound = 0
cs.LockedBlock = nil
cs.LockedBlockParts = nil
@@ -1450,7 +1429,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool,
}
case types.VoteTypePrecommit:
precommits := cs.Votes.Precommits(vote.Round)
log.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
blockID, ok := precommits.TwoThirdsMajority()
if ok {
if len(blockID.Hash) == 0 {
@@ -1460,7 +1439,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool,
cs.enterPrecommit(height, vote.Round)
cs.enterCommit(height, vote.Round)
if cs.timeoutParams.SkipTimeoutCommit && precommits.HasAll() {
if cs.config.SkipTimeoutCommit && precommits.HasAll() {
// if we have all the votes now,
// go straight to new round (skip timeout commit)
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, RoundStepNewHeight)
@@ -1474,7 +1453,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool,
cs.enterPrecommitWait(height, vote.Round)
}
default:
PanicSanity(Fmt("Unexpected vote type %X", vote.Type)) // Should not happen.
cmn.PanicSanity(cmn.Fmt("Unexpected vote type %X", vote.Type)) // Should not happen.
}
}
// Either duplicate, or error upon cs.Votes.AddByIndex()
@@ -1484,7 +1463,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool,
}
// Height mismatch, bad peer?
log.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err)
cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err)
return
}
@@ -1512,11 +1491,11 @@ func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.Part
vote, err := cs.signVote(type_, hash, header)
if err == nil {
cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
log.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "error", err)
cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "error", err)
return vote
} else {
//if !cs.replayMode {
log.Warn("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "error", err)
cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "error", err)
//}
return nil
}

View File

@@ -6,17 +6,16 @@ import (
"testing"
"time"
. "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common"
)
func init() {
config = tendermint_test.ResetConfig("consensus_state_test")
config = ResetConfig("consensus_state_test")
}
func (tp *TimeoutParams) ensureProposeTimeout() time.Duration {
return time.Duration(tp.Propose0*2) * time.Millisecond
func ensureProposeTimeout(timeoutPropose int) time.Duration {
return time.Duration(timeoutPropose*2) * time.Millisecond
}
/*
@@ -126,7 +125,7 @@ func TestEnterProposeNoPrivValidator(t *testing.T) {
startTestRound(cs, height, round)
// if we're not a validator, EnterPropose should timeout
ticker := time.NewTicker(cs.timeoutParams.ensureProposeTimeout())
ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose))
select {
case <-timeoutCh:
case <-ticker.C:
@@ -167,7 +166,7 @@ func TestEnterProposeYesPrivValidator(t *testing.T) {
}
// if we're a validator, enterPropose should not timeout
ticker := time.NewTicker(cs.timeoutParams.ensureProposeTimeout())
ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose))
select {
case <-timeoutCh:
panic("Expected EnterPropose not to timeout")
@@ -181,7 +180,7 @@ func TestBadProposal(t *testing.T) {
height, round := cs1.Height, cs1.Round
vs2 := vss[1]
partSize := config.GetInt("block_part_size")
partSize := config.Consensus.BlockPartSize
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
@@ -201,7 +200,7 @@ func TestBadProposal(t *testing.T) {
propBlock.AppHash = stateHash
propBlockParts := propBlock.MakePartSet(partSize)
proposal := types.NewProposal(vs2.Height, round, propBlockParts.Header(), -1, types.BlockID{})
if err := vs2.SignProposal(config.GetString("chain_id"), proposal); err != nil {
if err := vs2.SignProposal(config.ChainID, proposal); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
@@ -248,7 +247,7 @@ func TestFullRound1(t *testing.T) {
// grab proposal
re := <-propCh
propBlockHash := re.(types.EventDataRoundState).RoundState.(*RoundState).ProposalBlock.Hash()
propBlockHash := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState).ProposalBlock.Hash()
<-voteCh // wait for prevote
// NOTE: voteChan cap of 0 ensures we can complete this
@@ -328,7 +327,7 @@ func TestLockNoPOL(t *testing.T) {
vs2 := vss[1]
height := cs1.Height
partSize := config.GetInt("block_part_size")
partSize := config.Consensus.BlockPartSize
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
@@ -345,7 +344,7 @@ func TestLockNoPOL(t *testing.T) {
cs1.startRoutines(0)
re := <-proposalCh
rs := re.(types.EventDataRoundState).RoundState.(*RoundState)
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
theBlockHash := rs.ProposalBlock.Hash()
<-voteCh // prevote
@@ -376,7 +375,7 @@ func TestLockNoPOL(t *testing.T) {
///
<-newRoundCh
log.Notice("#### ONTO ROUND 1")
t.Log("#### ONTO ROUND 1")
/*
Round2 (cs1, B) // B B2
*/
@@ -385,7 +384,7 @@ func TestLockNoPOL(t *testing.T) {
// now we're on a new round and not the proposer, so wait for timeout
re = <-timeoutProposeCh
rs = re.(types.EventDataRoundState).RoundState.(*RoundState)
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
if rs.ProposalBlock != nil {
panic("Expected proposal block to be nil")
@@ -421,7 +420,7 @@ func TestLockNoPOL(t *testing.T) {
<-timeoutWaitCh
<-newRoundCh
log.Notice("#### ONTO ROUND 2")
t.Log("#### ONTO ROUND 2")
/*
Round3 (vs2, _) // B, B2
*/
@@ -429,7 +428,7 @@ func TestLockNoPOL(t *testing.T) {
incrementRound(vs2)
re = <-proposalCh
rs = re.(types.EventDataRoundState).RoundState.(*RoundState)
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
// now we're on a new round and are the proposer
if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) {
@@ -462,7 +461,7 @@ func TestLockNoPOL(t *testing.T) {
incrementRound(vs2)
<-newRoundCh
log.Notice("#### ONTO ROUND 3")
t.Log("#### ONTO ROUND 3")
/*
Round4 (vs2, C) // B C // B C
*/
@@ -494,7 +493,7 @@ func TestLockPOLRelock(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
partSize := config.Consensus.BlockPartSize
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
@@ -503,7 +502,7 @@ func TestLockPOLRelock(t *testing.T) {
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlockHeader(), 1)
log.Debug("vs2 last round", "lr", vs2.PrivValidator.LastRound)
t.Logf("vs2 last round %v", vs2.PrivValidator.LastRound)
// everything done from perspective of cs1
@@ -518,7 +517,7 @@ func TestLockPOLRelock(t *testing.T) {
<-newRoundCh
re := <-proposalCh
rs := re.(types.EventDataRoundState).RoundState.(*RoundState)
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
theBlockHash := rs.ProposalBlock.Hash()
<-voteCh // prevote
@@ -549,7 +548,7 @@ func TestLockPOLRelock(t *testing.T) {
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer")
<-newRoundCh
log.Notice("### ONTO ROUND 1")
t.Log("### ONTO ROUND 1")
/*
Round2 (vs2, C) // B C C C // C C C _)
@@ -589,9 +588,9 @@ func TestLockPOLRelock(t *testing.T) {
_, _ = <-voteCh, <-voteCh
be := <-newBlockCh
b := be.(types.EventDataNewBlockHeader)
b := be.(types.TMEventData).Unwrap().(types.EventDataNewBlockHeader)
re = <-newRoundCh
rs = re.(types.EventDataRoundState).RoundState.(*RoundState)
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
if rs.Height != 2 {
panic("Expected height to increment")
}
@@ -606,7 +605,7 @@ func TestLockPOLUnlock(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
partSize := config.Consensus.BlockPartSize
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
@@ -627,7 +626,7 @@ func TestLockPOLUnlock(t *testing.T) {
startTestRound(cs1, cs1.Height, 0)
<-newRoundCh
re := <-proposalCh
rs := re.(types.EventDataRoundState).RoundState.(*RoundState)
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
theBlockHash := rs.ProposalBlock.Hash()
<-voteCh // prevote
@@ -653,14 +652,14 @@ func TestLockPOLUnlock(t *testing.T) {
// timeout to new round
re = <-timeoutWaitCh
rs = re.(types.EventDataRoundState).RoundState.(*RoundState)
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
lockedBlockHash := rs.LockedBlock.Hash()
//XXX: this isnt gauranteed to get there before the timeoutPropose ...
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer")
<-newRoundCh
log.Notice("#### ONTO ROUND 1")
t.Log("#### ONTO ROUND 1")
/*
Round2 (vs2, C) // B nil nil nil // nil nil nil _
@@ -701,7 +700,7 @@ func TestLockPOLSafety1(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
partSize := config.Consensus.BlockPartSize
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
@@ -713,7 +712,7 @@ func TestLockPOLSafety1(t *testing.T) {
startTestRound(cs1, cs1.Height, 0)
<-newRoundCh
re := <-proposalCh
rs := re.(types.EventDataRoundState).RoundState.(*RoundState)
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
propBlock := rs.ProposalBlock
<-voteCh // prevote
@@ -732,7 +731,7 @@ func TestLockPOLSafety1(t *testing.T) {
panic("failed to update validator")
}*/
log.Warn("old prop", "hash", fmt.Sprintf("%X", propBlock.Hash()))
t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash()))
// we do see them precommit nil
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4)
@@ -747,7 +746,7 @@ func TestLockPOLSafety1(t *testing.T) {
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer")
<-newRoundCh
log.Notice("### ONTO ROUND 1")
t.Log("### ONTO ROUND 1")
/*Round2
// we timeout and prevote our lock
// a polka happened but we didn't see it!
@@ -761,12 +760,12 @@ func TestLockPOLSafety1(t *testing.T) {
re = <-proposalCh
}
rs = re.(types.EventDataRoundState).RoundState.(*RoundState)
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
if rs.LockedBlock != nil {
panic("we should not be locked!")
}
log.Warn("new prop", "hash", fmt.Sprintf("%X", propBlockHash))
t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash))
// go to prevote, prevote for proposal block
<-voteCh
validatePrevote(t, cs1, 1, vss[0], propBlockHash)
@@ -787,7 +786,7 @@ func TestLockPOLSafety1(t *testing.T) {
<-newRoundCh
log.Notice("### ONTO ROUND 2")
t.Log("### ONTO ROUND 2")
/*Round3
we see the polka from round 1 but we shouldn't unlock!
*/
@@ -806,7 +805,7 @@ func TestLockPOLSafety1(t *testing.T) {
// add prevotes from the earlier round
addVotes(cs1, prevotes...)
log.Warn("Done adding prevotes!")
t.Log("Done adding prevotes!")
ensureNoNewStep(newStepCh)
}
@@ -822,7 +821,7 @@ func TestLockPOLSafety2(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
partSize := config.Consensus.BlockPartSize
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
@@ -850,7 +849,7 @@ func TestLockPOLSafety2(t *testing.T) {
cs1.updateRoundStep(0, RoundStepPrecommitWait)
log.Notice("### ONTO Round 1")
t.Log("### ONTO Round 1")
// jump in at round 1
height := cs1.Height
startTestRound(cs1, height, 1)
@@ -878,7 +877,7 @@ func TestLockPOLSafety2(t *testing.T) {
// in round 2 we see the polkad block from round 0
newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0, propBlockID1)
if err := vs3.SignProposal(config.GetString("chain_id"), newProp); err != nil {
if err := vs3.SignProposal(config.ChainID, newProp); err != nil {
t.Fatal(err)
}
cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer")
@@ -887,7 +886,7 @@ func TestLockPOLSafety2(t *testing.T) {
addVotes(cs1, prevotes...)
<-newRoundCh
log.Notice("### ONTO Round 2")
t.Log("### ONTO Round 2")
/*Round2
// now we see the polka from round 1, but we shouldnt unlock
*/
@@ -997,7 +996,7 @@ func TestHalt1(t *testing.T) {
cs1, vss := randConsensusState(4)
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
partSize := config.Consensus.BlockPartSize
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
@@ -1009,7 +1008,7 @@ func TestHalt1(t *testing.T) {
startTestRound(cs1, cs1.Height, 0)
<-newRoundCh
re := <-proposalCh
rs := re.(types.EventDataRoundState).RoundState.(*RoundState)
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
propBlock := rs.ProposalBlock
propBlockParts := propBlock.MakePartSet(partSize)
@@ -1032,9 +1031,9 @@ func TestHalt1(t *testing.T) {
// timeout to new round
<-timeoutWaitCh
re = <-newRoundCh
rs = re.(types.EventDataRoundState).RoundState.(*RoundState)
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
log.Notice("### ONTO ROUND 1")
t.Log("### ONTO ROUND 1")
/*Round2
// we timeout and prevote our lock
// a polka happened but we didn't see it!
@@ -1050,7 +1049,7 @@ func TestHalt1(t *testing.T) {
// receiving that precommit should take us straight to commit
<-newBlockCh
re = <-newRoundCh
rs = re.(types.EventDataRoundState).RoundState.(*RoundState)
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*RoundState)
if rs.Height != 2 {
panic("expected height to increment")

35
consensus/test_data/build.sh Normal file → Executable file
View File

@@ -1,31 +1,38 @@
#! /bin/bash
#!/usr/bin/env bash
# XXX: removes tendermint dir
cd $GOPATH/src/github.com/tendermint/tendermint
cd "$GOPATH/src/github.com/tendermint/tendermint" || exit 1
# Make sure we have a tendermint command.
if ! hash tendermint 2>/dev/null; then
make install
fi
# specify a dir to copy
# TODO: eventually we should replace with `tendermint init --test`
DIR=$HOME/.tendermint_test/consensus_state_test
DIR_TO_COPY=$HOME/.tendermint_test/consensus_state_test
rm -rf $HOME/.tendermint
cp -r $DIR $HOME/.tendermint
TMHOME="$HOME/.tendermint"
rm -rf "$TMHOME"
cp -r "$DIR_TO_COPY" "$TMHOME"
cp $TMHOME/config.toml $TMHOME/config.toml.bak
function reset(){
rm -rf $HOME/.tendermint/data
tendermint unsafe_reset_priv_validator
tendermint unsafe_reset_all
cp $TMHOME/config.toml.bak $TMHOME/config.toml
}
reset
# empty block
function empty_block(){
tendermint node --proxy_app=dummy &> /dev/null &
tendermint node --proxy_app=persistent_dummy &> /dev/null &
sleep 5
killall tendermint
# /q would print up to and including the match, then quit.
# /Q doesn't include the match.
# /q would print up to and including the match, then quit.
# /Q doesn't include the match.
# http://unix.stackexchange.com/questions/11305/grep-show-all-the-file-up-to-the-match
sed '/ENDHEIGHT: 1/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/empty_block.cswal
@@ -36,7 +43,7 @@ reset
function many_blocks(){
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
PID=$!
tendermint node --proxy_app=dummy &> /dev/null &
tendermint node --proxy_app=persistent_dummy &> /dev/null &
sleep 7
killall tendermint
kill -9 $PID
@@ -51,7 +58,7 @@ reset
function small_block1(){
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
PID=$!
tendermint node --proxy_app=dummy &> /dev/null &
tendermint node --proxy_app=persistent_dummy &> /dev/null &
sleep 10
killall tendermint
kill -9 $PID
@@ -68,7 +75,7 @@ echo "" >> ~/.tendermint/config.toml
echo "block_part_size = 512" >> ~/.tendermint/config.toml
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
PID=$!
tendermint node --proxy_app=dummy &> /dev/null &
tendermint node --proxy_app=persistent_dummy &> /dev/null &
sleep 5
killall tendermint
kill -9 $PID
@@ -80,7 +87,7 @@ reset
case "$1" in
case "$1" in
"small_block1")
small_block1
;;

View File

@@ -1,10 +1,10 @@
#ENDHEIGHT: 0
{"time":"2016-12-18T05:05:33.502Z","msg":[3,{"duration":974084551,"height":1,"round":0,"step":1}]}
{"time":"2016-12-18T05:05:33.505Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2016-12-18T05:05:33.505Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"62C0F2BCCB491399EEDAF8E85837ADDD4E25BAB7A84BFC4F0E88594531FBC6D4755DEC7E6427F04AD7EB8BB89502762AB4380C7BBA93A4C297E6180EC78E3504"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:33.506Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F74657374010114914148D83E0DC00000000000000114354594CBFC1A7BCA1AD0050ED6AA010023EADA390001000100000000","proof":{"aunts":[]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:33.508Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
{"time":"2016-12-18T05:05:33.508Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":1,"block_id":{"hash":"3E83DF89A01C5F104912E095F32451C202F34717","parts":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"}},"signature":[1,"B64D0BB64B2E9AAFDD4EBEA679644F77AE774D69E3E2E1B042AB15FE4F84B1427AC6C8A25AFF58EA22011AE567FEA49D2EE7354382E915AD85BF40C58FA6130C"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:33.509Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2016-12-18T05:05:33.509Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"3E83DF89A01C5F104912E095F32451C202F34717","parts":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"}},"signature":[1,"D83E968392D1BF09821E0D05079DAB5491CABD89BE128BD1CF573ED87148BA84667A56C0A069EFC90760F25EDAC62BC324DBB12EA63F44E6CB2D3500FE5E640F"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:33.509Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]}
{"time":"2017-04-27T22:24:01.346Z","msg":[3,{"duration":972946821,"height":1,"round":0,"step":1}]}
{"time":"2017-04-27T22:24:01.349Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2017-04-27T22:24:01.349Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"ACED4A95DDEBD24E66A681F7EAB4CA22C4B8546D"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"E785764AED6D92D7CC65C0A3A4ED9C8465198A05142C3E6C7F3EF601FDCD3A604900B77B7B87C046221EF99FD038A960398385BD5BBAA50EE4F86DE757B8F704"]}}],"peer_key":""}]}
{"time":"2017-04-27T22:24:01.350Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F74657374010114B96165CF4496C00000000000000114354594CBFC1A7BCA1AD0050ED6AA010023EADA390001000100000000","proof":{"aunts":[]}}}],"peer_key":""}]}
{"time":"2017-04-27T22:24:01.351Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
{"time":"2017-04-27T22:24:01.351Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":1,"block_id":{"hash":"F3BBFBE7E4A5D619E2C498C3D1B912883786DD71","parts":{"total":1,"hash":"ACED4A95DDEBD24E66A681F7EAB4CA22C4B8546D"}},"signature":[1,"35C937C78D061ECDC3770982A1330C9AA7F6FEF00835C43DEB50B8FCF69A3EEF221E675EE5E469114F64E4FBBABA414EB9170E1025FC47D3F0EADE46767D2E00"]}}],"peer_key":""}]}
{"time":"2017-04-27T22:24:01.352Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2017-04-27T22:24:01.352Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"F3BBFBE7E4A5D619E2C498C3D1B912883786DD71","parts":{"total":1,"hash":"ACED4A95DDEBD24E66A681F7EAB4CA22C4B8546D"}},"signature":[1,"D1A7D27FCD5D352F3A3EDA8DE368520BC5B796662E32BCD8D91CDB8209A88DAF37CB7C4C93143D3C12B37C1435229268098CFFD0AD1400D88DA7606454692301"]}}],"peer_key":""}]}
{"time":"2017-04-27T22:24:01.352Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -1,14 +1,15 @@
#ENDHEIGHT: 0
{"time":"2016-12-18T05:05:43.641Z","msg":[3,{"duration":969409681,"height":1,"round":0,"step":1}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"F1A8E9928889C68FD393F3983B5362AECA4A95AA13FE3C78569B2515EC046893CB718071CAF54F3F1507DCD851B37CD5557EA17BB5471D2DC6FB5AC5FBB72E02"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F7465737401011491414B3483A8400190000000000114926EA77D30A4D19866159DE7E58AA9461F90F9D10114354594CBFC1A7BCA1AD0050ED6AA010023EADA3900010190010D6162636431323D646362613132010D6162636431333D646362613133010D6162636431343D646362613134010D6162636431353D646362613135010D6162636431363D646362613136010D6162636431373D646362613137010D6162636431383D646362613138010D6162636431393D646362613139010D6162636432303D646362613230010D6162636432313D646362613231010D6162636432323D646362613232010D6162636432333D646362613233010D6162636432343D646362613234010D6162636432353D646362613235010D6162636432363D646362613236010D6162636432373D646362613237010D6162636432383D646362613238010D6162636432393D646362613239010D6162636433303D646362613330010D6162636433313D646362613331010D6162636433323D646362613332010D6162636433333D646362613333010D6162636433343D646362613334010D6162636433353D646362613335010D6162636433363D646362613336010D6162636433373D646362613337010D6162636433383D646362613338010D6162636433393D646362613339010D6162636434303D","proof":{"aunts":["C9FBD66B63A976638196323F5B93494BDDFC9EED","47FD83BB7607E679EE5CF0783372D13C5A264056","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":1,"bytes":"646362613430010D6162636434313D646362613431010D6162636434323D646362613432010D6162636434333D646362613433010D6162636434343D646362613434010D6162636434353D646362613435010D6162636434363D646362613436010D6162636434373D646362613437010D6162636434383D646362613438010D6162636434393D646362613439010D6162636435303D646362613530010D6162636435313D646362613531010D6162636435323D646362613532010D6162636435333D646362613533010D6162636435343D646362613534010D6162636435353D646362613535010D6162636435363D646362613536010D6162636435373D646362613537010D6162636435383D646362613538010D6162636435393D646362613539010D6162636436303D646362613630010D6162636436313D646362613631010D6162636436323D646362613632010D6162636436333D646362613633010D6162636436343D646362613634010D6162636436353D646362613635010D6162636436363D646362613636010D6162636436373D646362613637010D6162636436383D646362613638010D6162636436393D646362613639010D6162636437303D646362613730010D6162636437313D646362613731010D6162636437323D646362613732010D6162636437333D646362613733010D6162636437343D6463","proof":{"aunts":["D7FB03B935B77C322064F8277823CDB5C7018597","47FD83BB7607E679EE5CF0783372D13C5A264056","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":2,"bytes":"62613734010D6162636437353D646362613735010D6162636437363D646362613736010D6162636437373D646362613737010D6162636437383D646362613738010D6162636437393D646362613739010D6162636438303D646362613830010D6162636438313D646362613831010D6162636438323D646362613832010D6162636438333D646362613833010D6162636438343D646362613834010D6162636438353D646362613835010D6162636438363D646362613836010D6162636438373D646362613837010D6162636438383D646362613838010D6162636438393D646362613839010D6162636439303D646362613930010D6162636439313D646362613931010D6162636439323D646362613932010D6162636439333D646362613933010D6162636439343D646362613934010D6162636439353D646362613935010D6162636439363D646362613936010D6162636439373D646362613937010D6162636439383D646362613938010D6162636439393D646362613939010F616263643130303D64636261313030010F616263643130313D64636261313031010F616263643130323D64636261313032010F616263643130333D64636261313033010F616263643130343D64636261313034010F616263643130353D64636261313035010F616263643130363D64636261313036010F616263643130373D64636261","proof":{"aunts":["A607D9BF5107E6C9FD19B6928D9CC7714B0730E4","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":3,"bytes":"313037010F616263643130383D64636261313038010F616263643130393D64636261313039010F616263643131303D64636261313130010F616263643131313D64636261313131010F616263643131323D64636261313132010F616263643131333D64636261313133010F616263643131343D64636261313134010F616263643131353D64636261313135010F616263643131363D64636261313136010F616263643131373D64636261313137010F616263643131383D64636261313138010F616263643131393D64636261313139010F616263643132303D64636261313230010F616263643132313D64636261313231010F616263643132323D64636261313232010F616263643132333D64636261313233010F616263643132343D64636261313234010F616263643132353D64636261313235010F616263643132363D64636261313236010F616263643132373D64636261313237010F616263643132383D64636261313238010F616263643132393D64636261313239010F616263643133303D64636261313330010F616263643133313D64636261313331010F616263643133323D64636261313332010F616263643133333D64636261313333010F616263643133343D64636261313334010F616263643133353D64636261313335010F616263643133363D64636261313336010F616263643133373D646362613133","proof":{"aunts":["0FD794B3506B9E92CDE3703F7189D42167E77095","86D455F542DA79F5A764B9DABDEABF01F4BAB2AB"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":4,"bytes":"37010F616263643133383D64636261313338010F616263643133393D64636261313339010F616263643134303D64636261313430010F616263643134313D64636261313431010F616263643134323D64636261313432010F616263643134333D64636261313433010F616263643134343D64636261313434010F616263643134353D64636261313435010F616263643134363D64636261313436010F616263643134373D64636261313437010F616263643134383D64636261313438010F616263643134393D64636261313439010F616263643135303D64636261313530010F616263643135313D64636261313531010F616263643135323D64636261313532010F616263643135333D64636261313533010F616263643135343D64636261313534010F616263643135353D646362613135350100000000","proof":{"aunts":["50CBDC078A660EAE3442BA355BE10EE0D04408D1","86D455F542DA79F5A764B9DABDEABF01F4BAB2AB"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.645Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
{"time":"2016-12-18T05:05:43.645Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":1,"block_id":{"hash":"6ADACDC2871C59A67337DAFD5045A982ED070C51","parts":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"}},"signature":[1,"E815E0A63B7EEE7894DE2D72372A7C393434AC8ACCC46B60C628910F73351806D55A59994F08B454BFD71EDAA0CA95733CA47E37FFDAF9AAA2431A8160176E01"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.647Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2016-12-18T05:05:43.647Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"6ADACDC2871C59A67337DAFD5045A982ED070C51","parts":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"}},"signature":[1,"9AAC3F3A118EE039EB460E9E5308D490D671C7490309BD5D62B5F392205C7E420DFDAF90F08294FF36BE8A9AA5CC203C1F2088B42D2BB8EE40A45F2BB5C54D0A"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.648Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]}
{"time":"2017-04-27T22:23:56.310Z","msg":[3,{"duration":969732098,"height":1,"round":0,"step":1}]}
{"time":"2017-04-27T22:23:56.312Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2017-04-27T22:23:56.312Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":6,"hash":"A3C176F13F5CBC7C48EE27A472800410C9D487DC"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"7624F6E943B7A207E16D1FA87EA099BD924E930F98E7DECBC01DB37735C619409588A67C2EABA9845FD6B80FDB65ECFCDA5F0DEFCEF74B8C34DB8E0540480203"]}}],"peer_key":""}]}
{"time":"2017-04-27T22:23:56.312Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F74657374010114B96164A30A118001620000000001141F6753D22BACA2180B1EADD722434EB28444D91D0114354594CBFC1A7BCA1AD0050ED6AA010023EADA3900010162011A3631363236333634333133303344363436333632363133313330011A3631363236333634333133313344363436333632363133313331011A3631363236333634333133323344363436333632363133313332011A3631363236333634333133333344363436333632363133313333011A3631363236333634333133343344363436333632363133313334011A3631363236333634333133353344363436333632363133313335011A3631363236333634333133363344363436333632363133313336011A3631363236333634333133373344363436333632363133313337011A3631363236333634333133383344363436333632363133313338011A3631363236333634333133393344363436333632363133313339011A3631363236333634333233303344363436333632363133323330011A3631363236333634333233313344363436333632363133323331011A3631363236333634333233323344363436333632363133323332011A3631363236333634333233333344363436333632363133323333011A3631363236333634333233343344363436333632363133323334011A36313632363336","proof":{"aunts":["49F4B71E3D7C457415069E2EA916DB12F67AA8D0","D35A72BEDAAAAC17045D7BFAAFA94C2EC0B0A4C2","705BC647374F3495EE73C3F44C21E9BDB4731738"]}}}],"peer_key":""}]}
{"time":"2017-04-27T22:23:56.312Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":1,"bytes":"34333233353344363436333632363133323335011A3631363236333634333233363344363436333632363133323336011A3631363236333634333233373344363436333632363133323337011A3631363236333634333233383344363436333632363133323338011A3631363236333634333233393344363436333632363133323339011A3631363236333634333333303344363436333632363133333330011A3631363236333634333333313344363436333632363133333331011A3631363236333634333333323344363436333632363133333332011A3631363236333634333333333344363436333632363133333333011A3631363236333634333333343344363436333632363133333334011A3631363236333634333333353344363436333632363133333335011A3631363236333634333333363344363436333632363133333336011A3631363236333634333333373344363436333632363133333337011A3631363236333634333333383344363436333632363133333338011A3631363236333634333333393344363436333632363133333339011A3631363236333634333433303344363436333632363133343330011A3631363236333634333433313344363436333632363133343331011A3631363236333634333433323344363436333632363133343332011A363136323633363433343333334436","proof":{"aunts":["5AD2A9A1A49A1FD6EF83F05FA4588F800B29DEF1","D35A72BEDAAAAC17045D7BFAAFA94C2EC0B0A4C2","705BC647374F3495EE73C3F44C21E9BDB4731738"]}}}],"peer_key":""}]}
{"time":"2017-04-27T22:23:56.312Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":2,"bytes":"3436333632363133343333011A3631363236333634333433343344363436333632363133343334011A3631363236333634333433353344363436333632363133343335011A3631363236333634333433363344363436333632363133343336011A3631363236333634333433373344363436333632363133343337011A3631363236333634333433383344363436333632363133343338011A3631363236333634333433393344363436333632363133343339011A3631363236333634333533303344363436333632363133353330011A3631363236333634333533313344363436333632363133353331011A3631363236333634333533323344363436333632363133353332011A3631363236333634333533333344363436333632363133353333011A3631363236333634333533343344363436333632363133353334011A3631363236333634333533353344363436333632363133353335011A3631363236333634333533363344363436333632363133353336011A3631363236333634333533373344363436333632363133353337011A3631363236333634333533383344363436333632363133353338011A3631363236333634333533393344363436333632363133353339011A3631363236333634333633303344363436333632363133363330011A3631363236333634333633313344363436333632363133","proof":{"aunts":["8B5786C3D871EE37B0F4B2DECAC39E157340DFBE","705BC647374F3495EE73C3F44C21E9BDB4731738"]}}}],"peer_key":""}]}
{"time":"2017-04-27T22:23:56.312Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":3,"bytes":"363331011A3631363236333634333633323344363436333632363133363332011A3631363236333634333633333344363436333632363133363333011A3631363236333634333633343344363436333632363133363334011A3631363236333634333633353344363436333632363133363335011A3631363236333634333633363344363436333632363133363336011A3631363236333634333633373344363436333632363133363337011A3631363236333634333633383344363436333632363133363338011A3631363236333634333633393344363436333632363133363339011A3631363236333634333733303344363436333632363133373330011A3631363236333634333733313344363436333632363133373331011A3631363236333634333733323344363436333632363133373332011A3631363236333634333733333344363436333632363133373333011A3631363236333634333733343344363436333632363133373334011A3631363236333634333733353344363436333632363133373335011A3631363236333634333733363344363436333632363133373336011A3631363236333634333733373344363436333632363133373337011A3631363236333634333733383344363436333632363133373338011A3631363236333634333733393344363436333632363133373339011A363136","proof":{"aunts":["56097661A1B2707588100586B3B1C2C8A51057D1","6DE889147DF528EEB5F7422E95DC45900CAFB619","247C721D5CEB90BB1FE389BA74C43DF0955E1647"]}}}],"peer_key":""}]}
{"time":"2017-04-27T22:23:56.312Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":4,"bytes":"3236333634333833303344363436333632363133383330011A3631363236333634333833313344363436333632363133383331011A3631363236333634333833323344363436333632363133383332011A3631363236333634333833333344363436333632363133383333011A3631363236333634333833343344363436333632363133383334011A3631363236333634333833353344363436333632363133383335011A3631363236333634333833363344363436333632363133383336011A3631363236333634333833373344363436333632363133383337011A3631363236333634333833383344363436333632363133383338011A3631363236333634333833393344363436333632363133383339011A3631363236333634333933303344363436333632363133393330011A3631363236333634333933313344363436333632363133393331011A3631363236333634333933323344363436333632363133393332011A3631363236333634333933333344363436333632363133393333011A3631363236333634333933343344363436333632363133393334011A3631363236333634333933353344363436333632363133393335011A3631363236333634333933363344363436333632363133393336011A3631363236333634333933373344363436333632363133393337011A3631363236333634333933","proof":{"aunts":["081D3DC5F11850851D5F0D760B98EE87BFA6B8B0","6DE889147DF528EEB5F7422E95DC45900CAFB619","247C721D5CEB90BB1FE389BA74C43DF0955E1647"]}}}],"peer_key":""}]}
{"time":"2017-04-27T22:23:56.313Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":5,"bytes":"383344363436333632363133393338011A3631363236333634333933393344363436333632363133393339011E363136323633363433313330333033443634363336323631333133303330011E363136323633363433313330333133443634363336323631333133303331011E363136323633363433313330333233443634363336323631333133303332011E363136323633363433313330333333443634363336323631333133303333011E363136323633363433313330333433443634363336323631333133303334011E363136323633363433313330333533443634363336323631333133303335011E363136323633363433313330333633443634363336323631333133303336011E3631363236333634333133303337334436343633363236313331333033370100000000","proof":{"aunts":["6AA912328C2B52EFA0ECE71F523E137E400EC484","247C721D5CEB90BB1FE389BA74C43DF0955E1647"]}}}],"peer_key":""}]}
{"time":"2017-04-27T22:23:56.314Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
{"time":"2017-04-27T22:23:56.314Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":1,"block_id":{"hash":"62371CF72F8662378691706DB256C833CF1AF81B","parts":{"total":6,"hash":"A3C176F13F5CBC7C48EE27A472800410C9D487DC"}},"signature":[1,"255906FAAA50C84E85DABF7DE73468E4F95DB4E46F598848145926E2FAD77CA682BF07E09E2F3EC81FFBD9A036B67914A3C02F819B69248D777AEBA792725907"]}}],"peer_key":""}]}
{"time":"2017-04-27T22:23:56.315Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2017-04-27T22:23:56.315Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"62371CF72F8662378691706DB256C833CF1AF81B","parts":{"total":6,"hash":"A3C176F13F5CBC7C48EE27A472800410C9D487DC"}},"signature":[1,"056CC15C748434D0A59B64B45CB56EDC1A437A426E68FA63DC7D61A7C17B0F768F207D81340D129A57C5A64195F8AFDD03B6BF28D7B2286290D61BCE88FCA304"]}}],"peer_key":""}]}
{"time":"2017-04-27T22:23:56.316Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]}

View File

@@ -3,7 +3,8 @@ package consensus
import (
"time"
. "github.com/tendermint/go-common"
. "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
)
var (
@@ -18,6 +19,8 @@ type TimeoutTicker interface {
Stop() bool
Chan() <-chan timeoutInfo // on which to receive a timeout
ScheduleTimeout(ti timeoutInfo) // reset the timer
SetLogger(log.Logger)
}
// timeoutTicker wraps time.Timer,
@@ -39,8 +42,8 @@ func NewTimeoutTicker() TimeoutTicker {
tickChan: make(chan timeoutInfo, tickTockBufferSize),
tockChan: make(chan timeoutInfo, tickTockBufferSize),
}
tt.BaseService = *NewBaseService(nil, "TimeoutTicker", tt)
tt.stopTimer() // don't want to fire until the first scheduled timeout
tt.BaseService = *NewBaseService(log, "TimeoutTicker", tt)
return tt
}
@@ -75,7 +78,7 @@ func (t *timeoutTicker) stopTimer() {
select {
case <-t.timer.C:
default:
log.Debug("Timer already stopped")
t.Logger.Debug("Timer already stopped")
}
}
}
@@ -84,12 +87,12 @@ func (t *timeoutTicker) stopTimer() {
// timers are interupted and replaced by new ticks from later steps
// timeouts of 0 on the tickChan will be immediately relayed to the tockChan
func (t *timeoutTicker) timeoutRoutine() {
log.Debug("Starting timeout routine")
t.Logger.Debug("Starting timeout routine")
var ti timeoutInfo
for {
select {
case newti := <-t.tickChan:
log.Debug("Received tick", "old_ti", ti, "new_ti", newti)
t.Logger.Debug("Received tick", "old_ti", ti, "new_ti", newti)
// ignore tickers for old height/round/step
if newti.Height < ti.Height {
@@ -111,9 +114,9 @@ func (t *timeoutTicker) timeoutRoutine() {
// NOTE time.Timer allows duration to be non-positive
ti = newti
t.timer.Reset(ti.Duration)
log.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
t.Logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
case <-t.timer.C:
log.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
t.Logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
// go routine here gaurantees timeoutRoutine doesn't block.
// Determinism comes from playback in the receiveRoutine.
// We can eliminate it by merging the timeoutRoutine into receiveRoutine

View File

@@ -1,7 +1,7 @@
package consensus
import (
. "github.com/tendermint/go-common"
. "github.com/tendermint/tmlibs/common"
)
// kind of arbitrary

View File

@@ -3,10 +3,10 @@ package consensus
import (
"time"
auto "github.com/tendermint/go-autofile"
. "github.com/tendermint/go-common"
"github.com/tendermint/go-wire"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
auto "github.com/tendermint/tmlibs/autofile"
. "github.com/tendermint/tmlibs/common"
)
//--------------------------------------------------------
@@ -49,9 +49,8 @@ func NewWAL(walFile string, light bool) (*WAL, error) {
group: group,
light: light,
}
wal.BaseService = *NewBaseService(log, "WAL", wal)
_, err = wal.Start()
return wal, err
wal.BaseService = *NewBaseService(nil, "WAL", wal)
return wal, nil
}
func (wal *WAL) OnStart() error {

View File

@@ -1,6 +1,6 @@
# Architecture Decision Records
This is a location to record all high-level architecture decisions in the tendermin project. Not the implementation details, but the reasoning that happened. This should be refered to for guidance of the "right way" to extend the application. And if we notice that the original decisions were lacking, we should have another open discussion, record the new decisions here, and then modify the code to match.
This is a location to record all high-level architecture decisions in the tendermint project. Not the implementation details, but the reasoning that happened. This should be refered to for guidance of the "right way" to extend the application. And if we notice that the original decisions were lacking, we should have another open discussion, record the new decisions here, and then modify the code to match.
This is like our guide and mentor when Jae and Bucky are offline.... The concept comes from a [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t) that resonated among the team when Anton shared it.

View File

@@ -0,0 +1,216 @@
# ADR 1: Logging
## Context
Current logging system in Tendermint is very static and not flexible enough.
Issues: [358](https://github.com/tendermint/tendermint/issues/358), [375](https://github.com/tendermint/tendermint/issues/375).
What we want from the new system:
- per package dynamic log levels
- dynamic logger setting (logger tied to the processing struct)
- conventions
- be more visually appealing
"dynamic" here means the ability to set smth in runtime.
## Decision
### 1) An interface
First, we will need an interface for all of our libraries (`tmlibs`, Tendermint, etc.). My personal preference is go-kit `Logger` interface (see Appendix A.), but that is too much a bigger change. Plus we will still need levels.
```go
# log.go
type Logger interface {
Debug(msg string, keyvals ...interface{}) error
Info(msg string, keyvals ...interface{}) error
Error(msg string, keyvals ...interface{}) error
With(keyvals ...interface{}) Logger
}
```
On a side note: difference between `Info` and `Notice` is subtle. We probably
could do without `Notice`. Don't think we need `Panic` or `Fatal` as a part of
the interface. These funcs could be implemented as helpers. In fact, we already
have some in `tmlibs/common`.
- `Debug` - extended output for devs
- `Info` - all that is useful for a user
- `Error` - errors
`Notice` should become `Info`, `Warn` either `Error` or `Debug` depending on the message, `Crit` -> `Error`.
This interface should go into `tmlibs/log`. All libraries which are part of the core (tendermint/tendermint) should obey it.
### 2) Logger with our current formatting
On top of this interface, we will need to implement a stdout logger, which will be used when Tendermint is configured to output logs to STDOUT.
Many people say that they like the current output, so let's stick with it.
```
NOTE[04-25|14:45:08] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0
```
Couple of minor changes:
```
I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0
```
Notice the level is encoded using only one char plus milliseconds.
Note: there are many other formats out there like [logfmt](https://brandur.org/logfmt).
This logger could be implemented using any logger - [logrus](https://github.com/sirupsen/logrus), [go-kit/log](https://github.com/go-kit/kit/tree/master/log), [zap](https://github.com/uber-go/zap), log15 so far as it
a) supports coloring output<br>
b) is moderately fast (buffering) <br>
c) conforms to the new interface or adapter could be written for it <br>
d) is somewhat configurable<br>
go-kit is my favorite so far. Check out how easy it is to color errors in red https://github.com/go-kit/kit/blob/master/log/term/example_test.go#L12. Although, coloring could only be applied to the whole string :(
```
go-kit +: flexible, modular
go-kit “-”: logfmt format https://brandur.org/logfmt
logrus +: popular, feature rich (hooks), API and output is more like what we want
logrus -: not so flexible
```
```go
# tm_logger.go
// NewTmLogger returns a logger that encodes keyvals to the Writer in
// tm format.
func NewTmLogger(w io.Writer) Logger {
return &tmLogger{kitlog.NewLogfmtLogger(w)}
}
func (l tmLogger) SetLevel(level string() {
switch (level) {
case "debug":
l.sourceLogger = level.NewFilter(l.sourceLogger, level.AllowDebug())
}
}
func (l tmLogger) Info(msg string, keyvals ...interface{}) error {
l.sourceLogger.Log("msg", msg, keyvals...)
}
# log.go
func With(logger Logger, keyvals ...interface{}) Logger {
kitlog.With(logger.sourceLogger, keyvals...)
}
```
Usage:
```go
logger := log.NewTmLogger(os.Stdout)
logger.SetLevel(config.GetString("log_level"))
node.SetLogger(log.With(logger, "node", Name))
```
**Other log formatters**
In the future, we may want other formatters like JSONFormatter.
```
{ "level": "notice", "time": "2017-04-25 14:45:08.562471297 -0400 EDT", "module": "consensus", "msg": "ABCI Replay Blocks", "appHeight": 0, "storeHeight": 0, "stateHeight": 0 }
```
### 3) Dynamic logger setting
https://dave.cheney.net/2017/01/23/the-package-level-logger-anti-pattern
This is the hardest part and where the most work will be done. logger should be tied to the processing struct, or the context if it adds some fields to the logger.
```go
type BaseService struct {
log log15.Logger
name string
started uint32 // atomic
stopped uint32 // atomic
...
}
```
BaseService already contains `log` field, so most of the structs embedding it should be fine. We should rename it to `logger`.
The only thing missing is the ability to set logger:
```
func (bs *BaseService) SetLogger(l log.Logger) {
bs.logger = l
}
```
### 4) Conventions
Important keyvals should go first. Example:
```
correct
I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus instance=1 appHeight=0 storeHeight=0 stateHeight=0
```
not
```
wrong
I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 instance=1
```
for that in most cases you'll need to add `instance` field to a logger upon creating, not when u log a particular message:
```go
colorFn := func(keyvals ...interface{}) term.FgBgColor {
for i := 1; i < len(keyvals); i += 2 {
if keyvals[i] == "instance" && keyvals[i+1] == "1" {
return term.FgBgColor{Fg: term.Blue}
} else if keyvals[i] == "instance" && keyvals[i+1] == "1" {
return term.FgBgColor{Fg: term.Red}
}
}
return term.FgBgColor{}
}
logger := term.NewLogger(os.Stdout, log.NewTmLogger, colorFn)
c1 := NewConsensusReactor(...)
c1.SetLogger(log.With(logger, "instance", 1))
c2 := NewConsensusReactor(...)
c2.SetLogger(log.With(logger, "instance", 2))
```
## Status
proposed
## Consequences
### Positive
Dynamic logger, which could be turned off for some modules at runtime. Public interface for other projects using Tendermint libraries.
### Negative
We may loose the ability to color keys in keyvalue pairs. go-kit allow you to easily change foreground / background colors of the whole string, but not its parts.
### Neutral
## Appendix A.
I really like a minimalistic approach go-kit took with his logger https://github.com/go-kit/kit/tree/master/log:
```
type Logger interface {
Log(keyvals ...interface{}) error
}
```
See [The Hunt for a Logger Interface](https://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide). The advantage is greater composability (check out how go-kit defines colored logging or log-leveled logging on top of this interface https://github.com/go-kit/kit/tree/master/log).

View File

@@ -2,7 +2,7 @@
To allow the efficient creation of an ABCi app, tendermint wishes to provide a reference implemention of a key-value store that provides merkle proofs of the data. These proofs then quickly allow the ABCi app to provide an apphash to the consensus engine, as well as a full proof to any client.
This engine is currently implemented in `go-merkle` with `merkleeyes` providing a language-agnostic binding via ABCi. It uses `go-db` bindings internally to persist data to leveldb.
This engine is currently implemented in `go-merkle` with `merkleeyes` providing a language-agnostic binding via ABCi. It uses `tmlibs/db` bindings internally to persist data to leveldb.
What are some of the requirements of this store:

147
glide.lock generated
View File

@@ -1,53 +1,88 @@
hash: d9724aa287c40d1b3856b6565f09235d809c8b2f7c6537c04f597137c0d6cd26
updated: 2017-04-21T13:09:25.708801802-04:00
hash: 93f15c9766ea826c29a91f545c42172eafd8c61e39c1d81617114ad1a9c9eaf2
updated: 2017-05-18T06:13:24.295793122-04:00
imports:
- name: github.com/btcsuite/btcd
version: 4b348c1d33373d672edd83fc576892d0e46686d2
version: 53f55a46349aa8f44b90895047e843666991cf24
subpackages:
- btcec
- name: github.com/BurntSushi/toml
version: b26d9c308763d68093482582cea63d69be07a0f0
- name: github.com/davecgh/go-spew
version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9
subpackages:
- spew
- name: github.com/ebuchman/fail-test
version: 95f809107225be108efcf10a3509e4ea6ceef3c4
- name: github.com/fsnotify/fsnotify
version: 4da3e2cfbabc9f751898f250b49f2439785783a1
- name: github.com/go-kit/kit
version: 6964666de57c88f7d93da127e900d201b632f561
subpackages:
- log
- log/level
- log/term
- name: github.com/go-logfmt/logfmt
version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
- name: github.com/go-stack/stack
version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82
version: 7a2f19628aabfe68f0766b59e74d6315f8347d22
- name: github.com/gogo/protobuf
version: 100ba4e885062801d56799d78530b73b178a78f3
version: 9df9efe4c742f1a2bfdedf1c3b6902fc6e814c6b
subpackages:
- proto
- name: github.com/golang/protobuf
version: 2bba0603135d7d7f5cb73b2125beeda19c09f4ef
version: fec3b39b059c0f88fa6b20f5ed012b1aa203a8b4
subpackages:
- proto
- ptypes/any
- name: github.com/golang/snappy
version: 553a641470496b2327abcac10b36396bd98e45c9
- name: github.com/gorilla/websocket
version: 3ab3a8b8831546bd18fd182c20687ca853b2bb13
version: a91eba7f97777409bc2c443f5534d41dd20c5720
- name: github.com/hashicorp/hcl
version: 392dba7d905ed5d04a5794ba89f558b27e2ba1ca
subpackages:
- hcl/ast
- hcl/parser
- hcl/scanner
- hcl/strconv
- hcl/token
- json/parser
- json/scanner
- json/token
- name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/jmhodges/levigo
version: c42d9e0ca023e2198120196f842701bb4c55d7b9
- name: github.com/mattn/go-colorable
version: ded68f7a9561c023e790de24279db7ebf473ea80
- name: github.com/mattn/go-isatty
version: fc9e8d8ef48496124e79ae0df75490096eccf6fe
- name: github.com/kr/logfmt
version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0
- name: github.com/magiconair/properties
version: 51463bfca2576e06c62a8504b5c0f06d61312647
- name: github.com/mitchellh/mapstructure
version: cc8532a8e9a55ea36402aa21efdf403a60d34096
- name: github.com/pelletier/go-buffruneio
version: c37440a7cf42ac63b919c752ca73a85067e05992
- name: github.com/pelletier/go-toml
version: 5c26a6ff6fd178719e15decac1c8196da0d7d6d1
- name: github.com/pkg/errors
version: 645ef00459ed84a119197bfb8d8205042c6df63d
version: c605e284fe17294bda444b34710735b29d1a9d90
- name: github.com/pmezard/go-difflib
version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
subpackages:
- difflib
- name: github.com/spf13/afero
version: 9be650865eab0c12963d8753212f4f9c66cdcf12
subpackages:
- mem
- name: github.com/spf13/cast
version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4
- name: github.com/spf13/cobra
version: 10f6b9d7e1631a54ad07c5c0fb71c28a1abfd3c2
version: 4cdb38c072b86bf795d2c81de50784d9fdd6eb77
- name: github.com/spf13/jwalterweatherman
version: 8f07c835e5cc1450c082fe3a439cf87b0cbb2d99
- name: github.com/spf13/pflag
version: 2300d0f8576fe575f71aaa5b9bbe4e1b0dc2eb51
version: e57e3eeb33f795204c1ca35f56c44f83227c6e66
- name: github.com/spf13/viper
version: 0967fc9aceab2ce9da34061253ac10fb99bba5b2
- name: github.com/stretchr/testify
version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0
version: 4d4bfba8f1d1027c4fdbe371823030df51419987
subpackages:
- assert
- require
@@ -67,7 +102,7 @@ imports:
- leveldb/table
- leveldb/util
- name: github.com/tendermint/abci
version: 56e13d87f4e3ec1ea756957d6b23caa6ebcf0998
version: 864d1f80b36b440bde030a5c18d8ac3aa8c2949d
subpackages:
- client
- example/counter
@@ -79,56 +114,35 @@ imports:
subpackages:
- edwards25519
- extra25519
- name: github.com/tendermint/go-autofile
version: 48b17de82914e1ec2f134ce823ba426337d2c518
- name: github.com/tendermint/go-clist
version: 3baa390bbaf7634251c42ad69a8682e7e3990552
- name: github.com/tendermint/go-common
version: f9e3db037330c8a8d61d3966de8473eaf01154fa
subpackages:
- test
- name: github.com/tendermint/go-config
version: 620dcbbd7d587cf3599dedbf329b64311b0c307a
- name: github.com/tendermint/go-crypto
version: 0ca2c6fdb0706001ca4c4b9b80c9f428e8cf39da
- name: github.com/tendermint/go-data
version: e7fcc6d081ec8518912fcdc103188275f83a3ee5
- name: github.com/tendermint/go-db
version: 9643f60bc2578693844aacf380a7c32e4c029fee
- name: github.com/tendermint/go-events
version: f8ffbfb2be3483e9e7927495590a727f51c0c11f
- name: github.com/tendermint/go-flowrate
version: a20c98e61957faa93b4014fbd902f20ab9317a6a
subpackages:
- flowrate
- name: github.com/tendermint/go-logger
version: cefb3a45c0bf3c493a04e9bcd9b1540528be59f2
- name: github.com/tendermint/go-merkle
version: 714d4d04557fd068a7c2a1748241ce8428015a96
- name: github.com/tendermint/go-p2p
version: e8f33a47846708269d373f9c8080613d6c4f66b2
subpackages:
- upnp
- name: github.com/tendermint/go-rpc
version: 2c8df0ee6b60d8ac33662df13a4e358c679e02bf
subpackages:
- client
- server
- types
version: 7dff40942a64cdeefefa9446b2d104750b349f8a
- name: github.com/tendermint/go-wire
version: c1c9a57ab8038448ddea1714c0698f8051e5748c
- name: github.com/tendermint/log15
version: ae0f3d6450da9eac7074b439c8e1c3cabf0d5ce6
version: 5f88da3dbc1a72844e6dfaf274ce87f851d488eb
subpackages:
- term
- data
- data/base58
- name: github.com/tendermint/merkleeyes
version: 9fb76efa5aebe773a598f97e68e75fe53d520e70
version: a0e73e1ac3e18e12a007520a4ea2c9822256e307
subpackages:
- app
- client
- iavl
- testutil
- name: github.com/tendermint/tmlibs
version: 306795ae1d8e4f4a10dcc8bdb32a00455843c9d5
subpackages:
- autofile
- cli
- clist
- common
- db
- events
- flowrate
- log
- merkle
- test
- name: golang.org/x/crypto
version: 96846453c37f0876340a66a47f3f75b1f3a6cd2d
version: 0fe963104e9d1877082f8fb38f816fcd97eb1d10
subpackages:
- curve25519
- nacl/box
@@ -139,7 +153,7 @@ imports:
- ripemd160
- salsa20/salsa
- name: golang.org/x/net
version: c8c74377599bd978aee1cf3b9b63a8634051cec2
version: 513929065c19401a1c7b76ecd942f9f86a0c061b
subpackages:
- context
- http2
@@ -149,25 +163,26 @@ imports:
- lex/httplex
- trace
- name: golang.org/x/sys
version: ea9bcade75cb975a0b9738936568ab388b845617
version: e62c3de784db939836898e5c19ffd41bece347da
subpackages:
- unix
- name: golang.org/x/text
version: 19e3104b43db45fca0303f489a9536087b184802
version: 19e51611da83d6be54ddafce4a4af510cb3e9ea4
subpackages:
- secure/bidirule
- transform
- unicode/bidi
- unicode/norm
- name: google.golang.org/genproto
version: 411e09b969b1170a9f0c467558eb4c4c110d9c77
version: bb3573be0c484136831138976d444b8754777aff
subpackages:
- googleapis/rpc/status
- name: google.golang.org/grpc
version: 6914ab1e338c92da4218a23d27fcd03d0ad78d46
version: 11d93ecdb918872ee841ba3a2dc391aa6d4f57c3
subpackages:
- codes
- credentials
- grpclb/grpc_lb_v1
- grpclog
- internal
- keepalive
@@ -178,4 +193,6 @@ imports:
- status
- tap
- transport
- name: gopkg.in/yaml.v2
version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
testImports: []

View File

@@ -1,56 +1,56 @@
package: github.com/tendermint/tendermint
import:
- package: github.com/tendermint/go-autofile
version: develop
- package: github.com/tendermint/go-clist
version: develop
- package: github.com/tendermint/go-common
version: develop
- package: github.com/tendermint/go-config
version: develop
- package: github.com/tendermint/go-crypto
version: develop
- package: github.com/tendermint/go-data
version: develop
- package: github.com/tendermint/go-db
version: develop
- package: github.com/tendermint/go-events
version: develop
- package: github.com/tendermint/go-logger
version: develop
- package: github.com/tendermint/go-merkle
version: develop
- package: github.com/tendermint/go-p2p
version: develop
- package: github.com/tendermint/go-rpc
version: develop
- package: github.com/tendermint/go-wire
version: develop
- package: github.com/ebuchman/fail-test
- package: github.com/gogo/protobuf
subpackages:
- proto
- package: github.com/golang/protobuf
subpackages:
- proto
- package: github.com/gorilla/websocket
- package: github.com/pkg/errors
- package: github.com/spf13/cobra
- package: github.com/spf13/viper
- package: github.com/stretchr/testify
subpackages:
- require
- package: github.com/tendermint/abci
version: develop
- package: github.com/tendermint/go-flowrate
- package: github.com/tendermint/log15
- package: github.com/tendermint/ed25519
version: v0.5.0
subpackages:
- client
- example/dummy
- types
- package: github.com/tendermint/go-crypto
version: v0.2.0
- package: github.com/tendermint/go-wire
version: v0.6.2
subpackages:
- data
- package: github.com/tendermint/tmlibs
version: v0.2.0
subpackages:
- autofile
- cli
- clist
- common
- db
- events
- flowrate
- log
- merkle
- package: golang.org/x/crypto
subpackages:
- nacl/box
- nacl/secretbox
- ripemd160
- package: golang.org/x/net
subpackages:
- context
- package: google.golang.org/grpc
testImport:
- package: github.com/tendermint/merkleeyes
version: develop
subpackages:
- app
- package: github.com/gogo/protobuf
version: ^0.3
subpackages:
- proto
- package: github.com/gorilla/websocket
version: ^1.1.0
- package: github.com/spf13/cobra
- package: github.com/spf13/pflag
- package: github.com/pkg/errors
version: ^0.8.0
- package: golang.org/x/crypto
subpackages:
- ripemd160
testImport:
- package: github.com/stretchr/testify
version: ^1.1.4
subpackages:
- assert
- require
- iavl
- testutil

View File

@@ -1,18 +0,0 @@
package mempool
import (
"github.com/tendermint/go-logger"
)
var log = logger.New("module", "mempool")
/*
func init() {
log.SetHandler(
logger.LvlFilterHandler(
logger.LvlDebug,
logger.BypassHandler(),
),
)
}
*/

View File

@@ -7,11 +7,15 @@ import (
"sync/atomic"
"time"
"github.com/pkg/errors"
abci "github.com/tendermint/abci/types"
auto "github.com/tendermint/go-autofile"
"github.com/tendermint/go-clist"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
auto "github.com/tendermint/tmlibs/autofile"
"github.com/tendermint/tmlibs/clist"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
)
@@ -47,7 +51,7 @@ TODO: Better handle abci client errors. (make it automatically handle connection
const cacheSize = 100000
type Mempool struct {
config cfg.Config
config *cfg.MempoolConfig
proxyMtx sync.Mutex
proxyAppConn proxy.AppConnMempool
@@ -64,9 +68,11 @@ type Mempool struct {
// A log of mempool txs
wal *auto.AutoFile
logger log.Logger
}
func NewMempool(config cfg.Config, proxyAppConn proxy.AppConnMempool) *Mempool {
func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool) *Mempool {
mempool := &Mempool{
config: config,
proxyAppConn: proxyAppConn,
@@ -76,26 +82,29 @@ func NewMempool(config cfg.Config, proxyAppConn proxy.AppConnMempool) *Mempool {
rechecking: 0,
recheckCursor: nil,
recheckEnd: nil,
cache: newTxCache(cacheSize),
logger: log.NewNopLogger(),
cache: newTxCache(cacheSize),
}
mempool.initWAL()
proxyAppConn.SetResponseCallback(mempool.resCb)
return mempool
}
// SetLogger allows you to set your own Logger.
func (mem *Mempool) SetLogger(l log.Logger) {
mem.logger = l
}
func (mem *Mempool) initWAL() {
walDir := mem.config.GetString("mempool_wal_dir")
walDir := mem.config.WalDir()
if walDir != "" {
err := EnsureDir(walDir, 0700)
err := cmn.EnsureDir(walDir, 0700)
if err != nil {
log.Error("Error ensuring Mempool wal dir", "error", err)
PanicSanity(err)
cmn.PanicSanity(errors.Wrap(err, "Error ensuring Mempool wal dir"))
}
af, err := auto.OpenAutoFile(walDir + "/wal")
if err != nil {
log.Error("Error opening Mempool wal file", "error", err)
PanicSanity(err)
cmn.PanicSanity(errors.Wrap(err, "Error opening Mempool wal file"))
}
mem.wal = af
}
@@ -202,7 +211,7 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) {
mem.txs.PushBack(memTx)
} else {
// ignore bad transaction
log.Info("Bad Transaction", "res", r)
mem.logger.Info("Bad Transaction", "res", r)
// remove from cache (it might be good later)
mem.cache.Remove(req.GetCheckTx().Tx)
@@ -219,7 +228,7 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) {
case *abci.Response_CheckTx:
memTx := mem.recheckCursor.Value.(*mempoolTx)
if !bytes.Equal(req.GetCheckTx().Tx, memTx.tx) {
PanicSanity(Fmt("Unexpected tx response from proxy during recheck\n"+
cmn.PanicSanity(cmn.Fmt("Unexpected tx response from proxy during recheck\n"+
"Expected %X, got %X", r.CheckTx.Data, memTx.tx))
}
if r.CheckTx.Code == abci.CodeType_OK {
@@ -240,7 +249,7 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) {
if mem.recheckCursor == nil {
// Done!
atomic.StoreInt32(&mem.rechecking, 0)
log.Info("Done rechecking txs")
mem.logger.Info("Done rechecking txs")
}
default:
// ignore other messages
@@ -269,7 +278,7 @@ func (mem *Mempool) collectTxs(maxTxs int) types.Txs {
} else if maxTxs < 0 {
maxTxs = mem.txs.Len()
}
txs := make([]types.Tx, 0, MinInt(mem.txs.Len(), maxTxs))
txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), maxTxs))
for e := mem.txs.Front(); e != nil && len(txs) < maxTxs; e = e.Next() {
memTx := e.Value.(*mempoolTx)
txs = append(txs, memTx.tx)
@@ -298,9 +307,8 @@ func (mem *Mempool) Update(height int, txs types.Txs) {
// Recheck mempool txs if any txs were committed in the block
// NOTE/XXX: in some apps a tx could be invalidated due to EndBlock,
// so we really still do need to recheck, but this is for debugging
if mem.config.GetBool("mempool_recheck") &&
(mem.config.GetBool("mempool_recheck_empty") || len(txs) > 0) {
log.Info("Recheck txs", "numtxs", len(goodTxs))
if mem.config.Recheck && (mem.config.RecheckEmpty || len(txs) > 0) {
mem.logger.Info("Recheck txs", "numtxs", len(goodTxs))
mem.recheckTxs(goodTxs)
// At this point, mem.txs are being rechecked.
// mem.recheckCursor re-scans mem.txs and possibly removes some txs.

View File

@@ -4,21 +4,31 @@ import (
"encoding/binary"
"testing"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/abci/example/counter"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/abci/example/counter"
"github.com/tendermint/tmlibs/log"
)
func TestSerialReap(t *testing.T) {
config := tendermint_test.ResetConfig("mempool_mempool_test")
config := cfg.ResetTestRoot("mempool_test")
app := counter.NewCounterApplication(true)
app.SetOption("serial", "on")
cc := proxy.NewLocalClientCreator(app)
appConnMem, _ := cc.NewABCIClient()
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
if _, err := appConnMem.Start(); err != nil {
t.Fatalf("Error starting ABCI client: %v", err.Error())
}
appConnCon, _ := cc.NewABCIClient()
mempool := NewMempool(config, appConnMem)
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
if _, err := appConnCon.Start(); err != nil {
t.Fatalf("Error starting ABCI client: %v", err.Error())
}
mempool := NewMempool(config.Mempool, appConnMem)
mempool.SetLogger(log.TestingLogger())
deliverTxsRange := func(start, end int) {
// Deliver some txs.

View File

@@ -7,10 +7,11 @@ import (
"time"
abci "github.com/tendermint/abci/types"
"github.com/tendermint/go-clist"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-wire"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tmlibs/clist"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
@@ -24,17 +25,17 @@ const (
// MempoolReactor handles mempool tx broadcasting amongst peers.
type MempoolReactor struct {
p2p.BaseReactor
config cfg.Config
config *cfg.MempoolConfig
Mempool *Mempool
evsw types.EventSwitch
}
func NewMempoolReactor(config cfg.Config, mempool *Mempool) *MempoolReactor {
func NewMempoolReactor(config *cfg.MempoolConfig, mempool *Mempool) *MempoolReactor {
memR := &MempoolReactor{
config: config,
Mempool: mempool,
}
memR.BaseReactor = *p2p.NewBaseReactor(log, "MempoolReactor", memR)
memR.BaseReactor = *p2p.NewBaseReactor("MempoolReactor", memR)
return memR
}
@@ -62,24 +63,24 @@ func (memR *MempoolReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes)
if err != nil {
log.Warn("Error decoding message", "error", err)
memR.Logger.Error("Error decoding message", "error", err)
return
}
log.Debug("Receive", "src", src, "chId", chID, "msg", msg)
memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
switch msg := msg.(type) {
case *TxMessage:
err := memR.Mempool.CheckTx(msg.Tx, nil)
if err != nil {
// Bad, seen, or conflicting tx.
log.Info("Could not add tx", "tx", msg.Tx)
memR.Logger.Info("Could not add tx", "tx", msg.Tx)
return
} else {
log.Info("Added valid tx", "tx", msg.Tx)
memR.Logger.Info("Added valid tx", "tx", msg.Tx)
}
// broadcasting happens from go routines per peer
default:
log.Warn(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
memR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
}
@@ -102,7 +103,7 @@ type Peer interface {
// TODO: Handle mempool or reactor shutdown?
// As is this routine may block forever if no new txs come in.
func (memR *MempoolReactor) broadcastTxRoutine(peer Peer) {
if !memR.config.GetBool("mempool_broadcast") {
if !memR.config.Broadcast {
return
}

View File

@@ -1,7 +0,0 @@
package node
import (
"github.com/tendermint/go-logger"
)
var log = logger.New("module", "node")

View File

@@ -8,26 +8,27 @@ import (
"strings"
abci "github.com/tendermint/abci/types"
cmn "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
crypto "github.com/tendermint/go-crypto"
dbm "github.com/tendermint/go-db"
p2p "github.com/tendermint/go-p2p"
rpc "github.com/tendermint/go-rpc"
rpcserver "github.com/tendermint/go-rpc/server"
wire "github.com/tendermint/go-wire"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/consensus"
mempl "github.com/tendermint/tendermint/mempool"
p2p "github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/proxy"
rpccore "github.com/tendermint/tendermint/rpc/core"
grpccore "github.com/tendermint/tendermint/rpc/grpc"
rpc "github.com/tendermint/tendermint/rpc/lib"
rpcserver "github.com/tendermint/tendermint/rpc/lib/server"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/state/txindex/kv"
"github.com/tendermint/tendermint/state/txindex/null"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log"
_ "net/http/pprof"
)
@@ -36,7 +37,7 @@ type Node struct {
cmn.BaseService
// config
config cfg.Config // user config
config *cfg.Config
genesisDoc *types.GenesisDoc // initial validator set
privValidator *types.PrivValidator // local node's validator key
@@ -57,42 +58,45 @@ type Node struct {
txIndexer txindex.TxIndexer
}
func NewNodeDefault(config cfg.Config) *Node {
func NewNodeDefault(config *cfg.Config, logger log.Logger) *Node {
// Get PrivValidator
privValidatorFile := config.GetString("priv_validator_file")
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
return NewNode(config, privValidator, proxy.DefaultClientCreator(config))
privValidator := types.LoadOrGenPrivValidator(config.PrivValidatorFile(), logger)
return NewNode(config, privValidator,
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), logger)
}
func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreator proxy.ClientCreator) *Node {
func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreator proxy.ClientCreator, logger log.Logger) *Node {
// Get BlockStore
blockStoreDB := dbm.NewDB("blockstore", config.GetString("db_backend"), config.GetString("db_dir"))
blockStoreDB := dbm.NewDB("blockstore", config.DBBackend, config.DBDir())
blockStore := bc.NewBlockStore(blockStoreDB)
// Get State
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
state := sm.GetState(config, stateDB)
consensusLogger := logger.With("module", "consensus")
stateLogger := logger.With("module", "state")
// add the chainid and number of validators to the global config
config.Set("chain_id", state.ChainID)
config.Set("num_vals", state.Validators.Size())
// Get State
stateDB := dbm.NewDB("state", config.DBBackend, config.DBDir())
state := sm.GetState(stateDB, config.GenesisFile())
state.SetLogger(stateLogger)
// Create the proxyApp, which manages connections (consensus, mempool, query)
// and sync tendermint and the app by replaying any necessary blocks
proxyApp := proxy.NewAppConns(config, clientCreator, consensus.NewHandshaker(config, state, blockStore))
handshaker := consensus.NewHandshaker(state, blockStore)
handshaker.SetLogger(consensusLogger)
proxyApp := proxy.NewAppConns(clientCreator, handshaker)
proxyApp.SetLogger(logger.With("module", "proxy"))
if _, err := proxyApp.Start(); err != nil {
cmn.Exit(cmn.Fmt("Error starting proxy app connections: %v", err))
}
// reload the state (it may have been updated by the handshake)
state = sm.LoadState(stateDB)
state.SetLogger(stateLogger)
// Transaction indexing
var txIndexer txindex.TxIndexer
switch config.GetString("tx_index") {
switch config.TxIndex {
case "kv":
store := dbm.NewDB("tx_index", config.GetString("db_backend"), config.GetString("db_dir"))
store := dbm.NewDB("tx_index", config.DBBackend, config.DBDir())
txIndexer = kv.NewTxIndex(store)
default:
txIndexer = &null.TxIndex{}
@@ -104,6 +108,7 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
// Make event switch
eventSwitch := types.NewEventSwitch()
eventSwitch.SetLogger(logger.With("module", "types"))
_, err := eventSwitch.Start()
if err != nil {
cmn.Exit(cmn.Fmt("Failed to start switch: %v", err))
@@ -111,7 +116,7 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
// Decide whether to fast-sync or not
// We don't fast-sync when the only validator is us.
fastSync := config.GetBool("fast_sync")
fastSync := config.FastSync
if state.Validators.Size() == 1 {
addr, _ := state.Validators.GetByIndex(0)
if bytes.Equal(privValidator.Address, addr) {
@@ -119,38 +124,55 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
}
}
// Log whether this node is a validator or an observer
if state.Validators.HasAddress(privValidator.Address) {
consensusLogger.Info("This node is a validator")
} else {
consensusLogger.Info("This node is not a validator")
}
// Make BlockchainReactor
bcReactor := bc.NewBlockchainReactor(config, state.Copy(), proxyApp.Consensus(), blockStore, fastSync)
bcReactor := bc.NewBlockchainReactor(state.Copy(), proxyApp.Consensus(), blockStore, fastSync)
bcReactor.SetLogger(logger.With("module", "blockchain"))
// Make MempoolReactor
mempool := mempl.NewMempool(config, proxyApp.Mempool())
mempoolReactor := mempl.NewMempoolReactor(config, mempool)
mempoolLogger := logger.With("module", "mempool")
mempool := mempl.NewMempool(config.Mempool, proxyApp.Mempool())
mempool.SetLogger(mempoolLogger)
mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool)
mempoolReactor.SetLogger(mempoolLogger)
// Make ConsensusReactor
consensusState := consensus.NewConsensusState(config, state.Copy(), proxyApp.Consensus(), blockStore, mempool)
consensusState := consensus.NewConsensusState(config.Consensus, state.Copy(), proxyApp.Consensus(), blockStore, mempool)
consensusState.SetLogger(consensusLogger)
if privValidator != nil {
consensusState.SetPrivValidator(privValidator)
}
consensusReactor := consensus.NewConsensusReactor(consensusState, fastSync)
consensusReactor.SetLogger(consensusLogger)
// Make p2p network switch
sw := p2p.NewSwitch(config.GetConfig("p2p"))
p2pLogger := logger.With("module", "p2p")
sw := p2p.NewSwitch(config.P2P)
sw.SetLogger(p2pLogger)
sw.AddReactor("MEMPOOL", mempoolReactor)
sw.AddReactor("BLOCKCHAIN", bcReactor)
sw.AddReactor("CONSENSUS", consensusReactor)
// Optionally, start the pex reactor
var addrBook *p2p.AddrBook
if config.GetBool("pex_reactor") {
addrBook = p2p.NewAddrBook(config.GetString("addrbook_file"), config.GetBool("addrbook_strict"))
if config.P2P.PexReactor {
addrBook = p2p.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
pexReactor := p2p.NewPEXReactor(addrBook)
pexReactor.SetLogger(p2pLogger)
sw.AddReactor("PEX", pexReactor)
}
// Filter peers by addr or pubkey with an ABCI query.
// If the query return code is OK, add peer.
// XXX: Query format subject to change
if config.GetBool("filter_peers") {
if config.FilterPeers {
// NOTE: addr is ip:port
sw.SetAddrFilter(func(addr net.Addr) error {
resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/addr/%s", addr.String())})
@@ -179,11 +201,11 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
SetEventSwitch(eventSwitch, bcReactor, mempoolReactor, consensusReactor)
// run the profile server
profileHost := config.GetString("prof_laddr")
profileHost := config.ProfListenAddress
if profileHost != "" {
go func() {
log.Warn("Profile server", "error", http.ListenAndServe(profileHost, nil))
logger.Error("Profile server", "error", http.ListenAndServe(profileHost, nil))
}()
}
@@ -205,15 +227,14 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreato
proxyApp: proxyApp,
txIndexer: txIndexer,
}
node.BaseService = *cmn.NewBaseService(log, "Node", node)
node.BaseService = *cmn.NewBaseService(logger, "Node", node)
return node
}
func (n *Node) OnStart() error {
// Create & add listener
protocol, address := ProtocolAndAddress(n.config.GetString("node_laddr"))
l := p2p.NewDefaultListener(protocol, address, n.config.GetBool("skip_upnp"))
protocol, address := ProtocolAndAddress(n.config.P2P.ListenAddress)
l := p2p.NewDefaultListener(protocol, address, n.config.P2P.SkipUPNP, n.Logger.With("module", "p2p"))
n.sw.AddListener(l)
// Start the switch
@@ -225,16 +246,16 @@ func (n *Node) OnStart() error {
}
// If seeds exist, add them to the address book and dial out
if n.config.GetString("seeds") != "" {
if n.config.P2P.Seeds != "" {
// dial out
seeds := strings.Split(n.config.GetString("seeds"), ",")
seeds := strings.Split(n.config.P2P.Seeds, ",")
if err := n.DialSeeds(seeds); err != nil {
return err
}
}
// Run the RPC server
if n.config.GetString("rpc_laddr") != "" {
if n.config.RPC.ListenAddress != "" {
listeners, err := n.startRPC()
if err != nil {
return err
@@ -248,14 +269,14 @@ func (n *Node) OnStart() error {
func (n *Node) OnStop() {
n.BaseService.OnStop()
log.Notice("Stopping Node")
n.Logger.Info("Stopping Node")
// TODO: gracefully disconnect from peers.
n.sw.Stop()
for _, l := range n.rpcListeners {
log.Info("Closing rpc listener", "listener", l)
n.Logger.Info("Closing rpc listener", "listener", l)
if err := l.Close(); err != nil {
log.Error("Error closing listener", "listener", l, "error", err)
n.Logger.Error("Error closing listener", "listener", l, "error", err)
}
}
}
@@ -284,7 +305,6 @@ func (n *Node) AddListener(l p2p.Listener) {
// ConfigureRPC sets all variables in rpccore so they will serve
// rpc calls from this node
func (n *Node) ConfigureRPC() {
rpccore.SetConfig(n.config)
rpccore.SetEventSwitch(n.evsw)
rpccore.SetBlockStore(n.blockStore)
rpccore.SetConsensusState(n.consensusState)
@@ -295,20 +315,27 @@ func (n *Node) ConfigureRPC() {
rpccore.SetAddrBook(n.addrBook)
rpccore.SetProxyAppQuery(n.proxyApp.Query())
rpccore.SetTxIndexer(n.txIndexer)
rpccore.SetLogger(n.Logger.With("module", "rpc"))
}
func (n *Node) startRPC() ([]net.Listener, error) {
n.ConfigureRPC()
listenAddrs := strings.Split(n.config.GetString("rpc_laddr"), ",")
listenAddrs := strings.Split(n.config.RPC.ListenAddress, ",")
if n.config.RPC.Unsafe {
rpccore.AddUnsafeRoutes()
}
// we may expose the rpc over both a unix and tcp socket
listeners := make([]net.Listener, len(listenAddrs))
for i, listenAddr := range listenAddrs {
mux := http.NewServeMux()
wm := rpcserver.NewWebsocketManager(rpccore.Routes, n.evsw)
rpcLogger := n.Logger.With("module", "rpc-server")
wm.SetLogger(rpcLogger)
mux.HandleFunc("/websocket", wm.WebsocketHandler)
rpcserver.RegisterRPCFuncs(mux, rpccore.Routes)
listener, err := rpcserver.StartHTTPServer(listenAddr, mux)
rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
listener, err := rpcserver.StartHTTPServer(listenAddr, mux, rpcLogger)
if err != nil {
return nil, err
}
@@ -316,7 +343,7 @@ func (n *Node) startRPC() ([]net.Listener, error) {
}
// we expose a simplified api over grpc for convenience to app devs
grpcListenAddr := n.config.GetString("grpc_laddr")
grpcListenAddr := n.config.RPC.GRPCListenAddress
if grpcListenAddr != "" {
listener, err := grpccore.StartGRPCServer(grpcListenAddr)
if err != nil {
@@ -372,9 +399,9 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo {
}
nodeInfo := &p2p.NodeInfo{
PubKey: n.privKey.PubKey().(crypto.PubKeyEd25519),
Moniker: n.config.GetString("moniker"),
Network: n.config.GetString("chain_id"),
PubKey: n.privKey.PubKey().Unwrap().(crypto.PubKeyEd25519),
Moniker: n.config.Moniker,
Network: n.consensusState.GetState().ChainID,
Version: version.Version,
Other: []string{
cmn.Fmt("wire_version=%v", wire.Version),
@@ -386,9 +413,10 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo {
}
// include git hash in the nodeInfo if available
if rev, err := cmn.ReadFile(n.config.GetString("revision_file")); err == nil {
// TODO: use ld-flags
/*if rev, err := cmn.ReadFile(n.config.GetString("revision_file")); err == nil {
nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("revision=%v", string(rev)))
}
}*/
if !n.sw.IsListening() {
return nodeInfo
@@ -397,7 +425,7 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo {
p2pListener := n.sw.Listeners()[0]
p2pHost := p2pListener.ExternalAddress().IP.String()
p2pPort := p2pListener.ExternalAddress().Port
rpcListenAddr := n.config.GetString("rpc_laddr")
rpcListenAddr := n.config.RPC.ListenAddress
// We assume that the rpcListener has the same ExternalAddress.
// This is probably true because both P2P and RPC listeners use UPnP,
@@ -426,3 +454,5 @@ func ProtocolAndAddress(listenAddr string) (string, string) {
}
return protocol, address
}
//------------------------------------------------------------------------------

View File

@@ -4,16 +4,17 @@ import (
"testing"
"time"
"github.com/tendermint/tendermint/config/tendermint_test"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tmlibs/log"
)
func TestNodeStartStop(t *testing.T) {
config := tendermint_test.ResetConfig("node_node_test")
config := cfg.ResetTestRoot("node_node_test")
// Create & start node
n := NewNodeDefault(config)
n := NewNodeDefault(config, log.TestingLogger())
n.Start()
log.Notice("Started node", "nodeInfo", n.sw.NodeInfo())
t.Logf("Started node %v", n.sw.NodeInfo())
// Wait a bit to initialize
// TODO remove time.Sleep(), make asynchronous.

78
p2p/CHANGELOG.md Normal file
View File

@@ -0,0 +1,78 @@
# Changelog
## 0.5.0 (April 21, 2017)
BREAKING CHANGES:
- Remove or unexport methods from FuzzedConnection: Active, Mode, ProbDropRW, ProbDropConn, ProbSleep, MaxDelayMilliseconds, Fuzz
- switch.AddPeerWithConnection is unexported and replaced by switch.AddPeer
- switch.DialPeerWithAddress takes a bool, setting the peer as persistent or not
FEATURES:
- Persistent peers: any peer considered a "seed" will be reconnected to when the connection is dropped
IMPROVEMENTS:
- Many more tests and comments
- Refactor configurations for less dependence on go-config. Introduces new structs PeerConfig, MConnConfig, FuzzConnConfig
- New methods on peer: CloseConn, HandshakeTimeout, IsPersistent, Addr, PubKey
- NewNetAddress supports a testing mode where the address defaults to 0.0.0.0:0
## 0.4.0 (March 6, 2017)
BREAKING CHANGES:
- DialSeeds now takes an AddrBook and returns an error: `DialSeeds(*AddrBook, []string) error`
- NewNetAddressString now returns an error: `NewNetAddressString(string) (*NetAddress, error)`
FEATURES:
- `NewNetAddressStrings([]string) ([]*NetAddress, error)`
- `AddrBook.Save()`
IMPROVEMENTS:
- PexReactor responsible for starting and stopping the AddrBook
BUG FIXES:
- DialSeeds returns an error instead of panicking on bad addresses
## 0.3.5 (January 12, 2017)
FEATURES
- Toggle strict routability in the AddrBook
BUG FIXES
- Close filtered out connections
- Fixes for MakeConnectedSwitches and Connect2Switches
## 0.3.4 (August 10, 2016)
FEATURES:
- Optionally filter connections by address or public key
## 0.3.3 (May 12, 2016)
FEATURES:
- FuzzConn
## 0.3.2 (March 12, 2016)
IMPROVEMENTS:
- Memory optimizations
## 0.3.1 ()
FEATURES:
- Configurable parameters

13
p2p/Dockerfile Normal file
View File

@@ -0,0 +1,13 @@
FROM golang:latest
RUN curl https://glide.sh/get | sh
RUN mkdir -p /go/src/github.com/tendermint/tendermint/p2p
WORKDIR /go/src/github.com/tendermint/tendermint/p2p
COPY glide.yaml /go/src/github.com/tendermint/tendermint/p2p/
COPY glide.lock /go/src/github.com/tendermint/tendermint/p2p/
RUN glide install
COPY . /go/src/github.com/tendermint/tendermint/p2p

79
p2p/README.md Normal file
View File

@@ -0,0 +1,79 @@
# `tendermint/tendermint/p2p`
[![CircleCI](https://circleci.com/gh/tendermint/tendermint/p2p.svg?style=svg)](https://circleci.com/gh/tendermint/tendermint/p2p)
`tendermint/tendermint/p2p` provides an abstraction around peer-to-peer communication.<br/>
## Peer/MConnection/Channel
Each peer has one `MConnection` (multiplex connection) instance.
__multiplex__ *noun* a system or signal involving simultaneous transmission of
several messages along a single channel of communication.
Each `MConnection` handles message transmission on multiple abstract communication
`Channel`s. Each channel has a globally unique byte id.
The byte id and the relative priorities of each `Channel` are configured upon
initialization of the connection.
There are two methods for sending messages:
```go
func (m MConnection) Send(chID byte, msg interface{}) bool {}
func (m MConnection) TrySend(chID byte, msg interface{}) bool {}
```
`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued
for the channel with the given id byte `chID`. The message `msg` is serialized
using the `tendermint/wire` submodule's `WriteBinary()` reflection routine.
`TrySend(chID, msg)` is a nonblocking call that returns false if the channel's
queue is full.
`Send()` and `TrySend()` are also exposed for each `Peer`.
## Switch/Reactor
The `Switch` handles peer connections and exposes an API to receive incoming messages
on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one
or more `Channels`. So while sending outgoing messages is typically performed on the peer,
incoming messages are received on the reactor.
```go
// Declare a MyReactor reactor that handles messages on MyChannelID.
type MyReactor struct{}
func (reactor MyReactor) GetChannels() []*ChannelDescriptor {
return []*ChannelDescriptor{ChannelDescriptor{ID:MyChannelID, Priority: 1}}
}
func (reactor MyReactor) Receive(chID byte, peer *Peer, msgBytes []byte) {
r, n, err := bytes.NewBuffer(msgBytes), new(int64), new(error)
msgString := ReadString(r, n, err)
fmt.Println(msgString)
}
// Other Reactor methods omitted for brevity
...
switch := NewSwitch([]Reactor{MyReactor{}})
...
// Send a random message to all outbound connections
for _, peer := range switch.Peers().List() {
if peer.IsOutbound() {
peer.Send(MyChannelID, "Here's a random message")
}
}
```
### PexReactor/AddrBook
A `PEXReactor` reactor implementation is provided to automate peer discovery.
```go
book := p2p.NewAddrBook(addrBookFilePath)
pexReactor := p2p.NewPEXReactor(book)
...
switch := NewSwitch([]Reactor{pexReactor, myReactor, ...})
```

841
p2p/addrbook.go Normal file
View File

@@ -0,0 +1,841 @@
// Modified for Tendermint
// Originally Copyright (c) 2013-2014 Conformal Systems LLC.
// https://github.com/conformal/btcd/blob/master/LICENSE
package p2p
import (
"encoding/binary"
"encoding/json"
"math"
"math/rand"
"net"
"os"
"sync"
"time"
crypto "github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
)
const (
// addresses under which the address manager will claim to need more addresses.
needAddressThreshold = 1000
// interval used to dump the address cache to disk for future use.
dumpAddressInterval = time.Minute * 2
// max addresses in each old address bucket.
oldBucketSize = 64
// buckets we split old addresses over.
oldBucketCount = 64
// max addresses in each new address bucket.
newBucketSize = 64
// buckets that we spread new addresses over.
newBucketCount = 256
// old buckets over which an address group will be spread.
oldBucketsPerGroup = 4
// new buckets over which an source address group will be spread.
newBucketsPerGroup = 32
// buckets a frequently seen new address may end up in.
maxNewBucketsPerAddress = 4
// days before which we assume an address has vanished
// if we have not seen it announced in that long.
numMissingDays = 30
// tries without a single success before we assume an address is bad.
numRetries = 3
// max failures we will accept without a success before considering an address bad.
maxFailures = 10
// days since the last success before we will consider evicting an address.
minBadDays = 7
// % of total addresses known returned by GetSelection.
getSelectionPercent = 23
// min addresses that must be returned by GetSelection. Useful for bootstrapping.
minGetSelection = 32
// max addresses returned by GetSelection
// NOTE: this must match "maxPexMessageSize"
maxGetSelection = 250
// current version of the on-disk format.
serializationVersion = 1
)
const (
bucketTypeNew = 0x01
bucketTypeOld = 0x02
)
// AddrBook - concurrency safe peer address manager.
type AddrBook struct {
cmn.BaseService
mtx sync.Mutex
filePath string
routabilityStrict bool
rand *rand.Rand
key string
ourAddrs map[string]*NetAddress
addrLookup map[string]*knownAddress // new & old
addrNew []map[string]*knownAddress
addrOld []map[string]*knownAddress
wg sync.WaitGroup
nOld int
nNew int
}
// NewAddrBook creates a new address book.
// Use Start to begin processing asynchronous address updates.
func NewAddrBook(filePath string, routabilityStrict bool) *AddrBook {
am := &AddrBook{
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
ourAddrs: make(map[string]*NetAddress),
addrLookup: make(map[string]*knownAddress),
filePath: filePath,
routabilityStrict: routabilityStrict,
}
am.init()
am.BaseService = *cmn.NewBaseService(nil, "AddrBook", am)
return am
}
// When modifying this, don't forget to update loadFromFile()
func (a *AddrBook) init() {
a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits
// New addr buckets
a.addrNew = make([]map[string]*knownAddress, newBucketCount)
for i := range a.addrNew {
a.addrNew[i] = make(map[string]*knownAddress)
}
// Old addr buckets
a.addrOld = make([]map[string]*knownAddress, oldBucketCount)
for i := range a.addrOld {
a.addrOld[i] = make(map[string]*knownAddress)
}
}
// OnStart implements Service.
func (a *AddrBook) OnStart() error {
a.BaseService.OnStart()
a.loadFromFile(a.filePath)
a.wg.Add(1)
go a.saveRoutine()
return nil
}
// OnStop implements Service.
func (a *AddrBook) OnStop() {
a.BaseService.OnStop()
}
func (a *AddrBook) Wait() {
a.wg.Wait()
}
func (a *AddrBook) AddOurAddress(addr *NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
a.Logger.Info("Add our address to book", "addr", addr)
a.ourAddrs[addr.String()] = addr
}
func (a *AddrBook) OurAddresses() []*NetAddress {
addrs := []*NetAddress{}
for _, addr := range a.ourAddrs {
addrs = append(addrs, addr)
}
return addrs
}
// NOTE: addr must not be nil
func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
a.Logger.Info("Add address to book", "addr", addr, "src", src)
a.addAddress(addr, src)
}
func (a *AddrBook) NeedMoreAddrs() bool {
return a.Size() < needAddressThreshold
}
func (a *AddrBook) Size() int {
a.mtx.Lock()
defer a.mtx.Unlock()
return a.size()
}
func (a *AddrBook) size() int {
return a.nNew + a.nOld
}
// Pick an address to connect to with new/old bias.
func (a *AddrBook) PickAddress(newBias int) *NetAddress {
a.mtx.Lock()
defer a.mtx.Unlock()
if a.size() == 0 {
return nil
}
if newBias > 100 {
newBias = 100
}
if newBias < 0 {
newBias = 0
}
// Bias between new and old addresses.
oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias))
newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias)
if (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation {
// pick random Old bucket.
var bucket map[string]*knownAddress = nil
for len(bucket) == 0 {
bucket = a.addrOld[a.rand.Intn(len(a.addrOld))]
}
// pick a random ka from bucket.
randIndex := a.rand.Intn(len(bucket))
for _, ka := range bucket {
if randIndex == 0 {
return ka.Addr
}
randIndex--
}
cmn.PanicSanity("Should not happen")
} else {
// pick random New bucket.
var bucket map[string]*knownAddress = nil
for len(bucket) == 0 {
bucket = a.addrNew[a.rand.Intn(len(a.addrNew))]
}
// pick a random ka from bucket.
randIndex := a.rand.Intn(len(bucket))
for _, ka := range bucket {
if randIndex == 0 {
return ka.Addr
}
randIndex--
}
cmn.PanicSanity("Should not happen")
}
return nil
}
func (a *AddrBook) MarkGood(addr *NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
ka := a.addrLookup[addr.String()]
if ka == nil {
return
}
ka.markGood()
if ka.isNew() {
a.moveToOld(ka)
}
}
func (a *AddrBook) MarkAttempt(addr *NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
ka := a.addrLookup[addr.String()]
if ka == nil {
return
}
ka.markAttempt()
}
// MarkBad currently just ejects the address. In the future, consider
// blacklisting.
func (a *AddrBook) MarkBad(addr *NetAddress) {
a.RemoveAddress(addr)
}
// RemoveAddress removes the address from the book.
func (a *AddrBook) RemoveAddress(addr *NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
ka := a.addrLookup[addr.String()]
if ka == nil {
return
}
a.Logger.Info("Remove address from book", "addr", addr)
a.removeFromAllBuckets(ka)
}
/* Peer exchange */
// GetSelection randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
func (a *AddrBook) GetSelection() []*NetAddress {
a.mtx.Lock()
defer a.mtx.Unlock()
if a.size() == 0 {
return nil
}
allAddr := make([]*NetAddress, a.size())
i := 0
for _, v := range a.addrLookup {
allAddr[i] = v.Addr
i++
}
numAddresses := cmn.MaxInt(
cmn.MinInt(minGetSelection, len(allAddr)),
len(allAddr)*getSelectionPercent/100)
numAddresses = cmn.MinInt(maxGetSelection, numAddresses)
// Fisher-Yates shuffle the array. We only need to do the first
// `numAddresses' since we are throwing the rest.
for i := 0; i < numAddresses; i++ {
// pick a number between current index and the end
j := rand.Intn(len(allAddr)-i) + i
allAddr[i], allAddr[j] = allAddr[j], allAddr[i]
}
// slice off the limit we are willing to share.
return allAddr[:numAddresses]
}
/* Loading & Saving */
type addrBookJSON struct {
Key string
Addrs []*knownAddress
}
func (a *AddrBook) saveToFile(filePath string) {
a.Logger.Info("Saving AddrBook to file", "size", a.Size())
a.mtx.Lock()
defer a.mtx.Unlock()
// Compile Addrs
addrs := []*knownAddress{}
for _, ka := range a.addrLookup {
addrs = append(addrs, ka)
}
aJSON := &addrBookJSON{
Key: a.key,
Addrs: addrs,
}
jsonBytes, err := json.MarshalIndent(aJSON, "", "\t")
if err != nil {
a.Logger.Error("Failed to save AddrBook to file", "err", err)
return
}
err = cmn.WriteFileAtomic(filePath, jsonBytes, 0644)
if err != nil {
a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "error", err)
}
}
// Returns false if file does not exist.
// cmn.Panics if file is corrupt.
func (a *AddrBook) loadFromFile(filePath string) bool {
// If doesn't exist, do nothing.
_, err := os.Stat(filePath)
if os.IsNotExist(err) {
return false
}
// Load addrBookJSON{}
r, err := os.Open(filePath)
if err != nil {
cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err))
}
defer r.Close()
aJSON := &addrBookJSON{}
dec := json.NewDecoder(r)
err = dec.Decode(aJSON)
if err != nil {
cmn.PanicCrisis(cmn.Fmt("Error reading file %s: %v", filePath, err))
}
// Restore all the fields...
// Restore the key
a.key = aJSON.Key
// Restore .addrNew & .addrOld
for _, ka := range aJSON.Addrs {
for _, bucketIndex := range ka.Buckets {
bucket := a.getBucket(ka.BucketType, bucketIndex)
bucket[ka.Addr.String()] = ka
}
a.addrLookup[ka.Addr.String()] = ka
if ka.BucketType == bucketTypeNew {
a.nNew++
} else {
a.nOld++
}
}
return true
}
// Save saves the book.
func (a *AddrBook) Save() {
a.Logger.Info("Saving AddrBook to file", "size", a.Size())
a.saveToFile(a.filePath)
}
/* Private methods */
func (a *AddrBook) saveRoutine() {
dumpAddressTicker := time.NewTicker(dumpAddressInterval)
out:
for {
select {
case <-dumpAddressTicker.C:
a.saveToFile(a.filePath)
case <-a.Quit:
break out
}
}
dumpAddressTicker.Stop()
a.saveToFile(a.filePath)
a.wg.Done()
a.Logger.Info("Address handler done")
}
func (a *AddrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress {
switch bucketType {
case bucketTypeNew:
return a.addrNew[bucketIdx]
case bucketTypeOld:
return a.addrOld[bucketIdx]
default:
cmn.PanicSanity("Should not happen")
return nil
}
}
// Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full.
// NOTE: currently it always returns true.
func (a *AddrBook) addToNewBucket(ka *knownAddress, bucketIdx int) bool {
// Sanity check
if ka.isOld() {
a.Logger.Error(cmn.Fmt("Cannot add address already in old bucket to a new bucket: %v", ka))
return false
}
addrStr := ka.Addr.String()
bucket := a.getBucket(bucketTypeNew, bucketIdx)
// Already exists?
if _, ok := bucket[addrStr]; ok {
return true
}
// Enforce max addresses.
if len(bucket) > newBucketSize {
a.Logger.Info("new bucket is full, expiring old ")
a.expireNew(bucketIdx)
}
// Add to bucket.
bucket[addrStr] = ka
if ka.addBucketRef(bucketIdx) == 1 {
a.nNew++
}
// Ensure in addrLookup
a.addrLookup[addrStr] = ka
return true
}
// Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full.
func (a *AddrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
// Sanity check
if ka.isNew() {
a.Logger.Error(cmn.Fmt("Cannot add new address to old bucket: %v", ka))
return false
}
if len(ka.Buckets) != 0 {
a.Logger.Error(cmn.Fmt("Cannot add already old address to another old bucket: %v", ka))
return false
}
addrStr := ka.Addr.String()
bucket := a.getBucket(bucketTypeNew, bucketIdx)
// Already exists?
if _, ok := bucket[addrStr]; ok {
return true
}
// Enforce max addresses.
if len(bucket) > oldBucketSize {
return false
}
// Add to bucket.
bucket[addrStr] = ka
if ka.addBucketRef(bucketIdx) == 1 {
a.nOld++
}
// Ensure in addrLookup
a.addrLookup[addrStr] = ka
return true
}
func (a *AddrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) {
if ka.BucketType != bucketType {
a.Logger.Error(cmn.Fmt("Bucket type mismatch: %v", ka))
return
}
bucket := a.getBucket(bucketType, bucketIdx)
delete(bucket, ka.Addr.String())
if ka.removeBucketRef(bucketIdx) == 0 {
if bucketType == bucketTypeNew {
a.nNew--
} else {
a.nOld--
}
delete(a.addrLookup, ka.Addr.String())
}
}
func (a *AddrBook) removeFromAllBuckets(ka *knownAddress) {
for _, bucketIdx := range ka.Buckets {
bucket := a.getBucket(ka.BucketType, bucketIdx)
delete(bucket, ka.Addr.String())
}
ka.Buckets = nil
if ka.BucketType == bucketTypeNew {
a.nNew--
} else {
a.nOld--
}
delete(a.addrLookup, ka.Addr.String())
}
func (a *AddrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
bucket := a.getBucket(bucketType, bucketIdx)
var oldest *knownAddress
for _, ka := range bucket {
if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) {
oldest = ka
}
}
return oldest
}
func (a *AddrBook) addAddress(addr, src *NetAddress) {
if a.routabilityStrict && !addr.Routable() {
a.Logger.Error(cmn.Fmt("Cannot add non-routable address %v", addr))
return
}
if _, ok := a.ourAddrs[addr.String()]; ok {
// Ignore our own listener address.
return
}
ka := a.addrLookup[addr.String()]
if ka != nil {
// Already old.
if ka.isOld() {
return
}
// Already in max new buckets.
if len(ka.Buckets) == maxNewBucketsPerAddress {
return
}
// The more entries we have, the less likely we are to add more.
factor := int32(2 * len(ka.Buckets))
if a.rand.Int31n(factor) != 0 {
return
}
} else {
ka = newKnownAddress(addr, src)
}
bucket := a.calcNewBucket(addr, src)
a.addToNewBucket(ka, bucket)
a.Logger.Info("Added new address", "address", addr, "total", a.size())
}
// Make space in the new buckets by expiring the really bad entries.
// If no bad entries are available we remove the oldest.
func (a *AddrBook) expireNew(bucketIdx int) {
for addrStr, ka := range a.addrNew[bucketIdx] {
// If an entry is bad, throw it away
if ka.isBad() {
a.Logger.Info(cmn.Fmt("expiring bad address %v", addrStr))
a.removeFromBucket(ka, bucketTypeNew, bucketIdx)
return
}
}
// If we haven't thrown out a bad entry, throw out the oldest entry
oldest := a.pickOldest(bucketTypeNew, bucketIdx)
a.removeFromBucket(oldest, bucketTypeNew, bucketIdx)
}
// Promotes an address from new to old.
// TODO: Move to old probabilistically.
// The better a node is, the less likely it should be evicted from an old bucket.
func (a *AddrBook) moveToOld(ka *knownAddress) {
// Sanity check
if ka.isOld() {
a.Logger.Error(cmn.Fmt("Cannot promote address that is already old %v", ka))
return
}
if len(ka.Buckets) == 0 {
a.Logger.Error(cmn.Fmt("Cannot promote address that isn't in any new buckets %v", ka))
return
}
// Remember one of the buckets in which ka is in.
freedBucket := ka.Buckets[0]
// Remove from all (new) buckets.
a.removeFromAllBuckets(ka)
// It's officially old now.
ka.BucketType = bucketTypeOld
// Try to add it to its oldBucket destination.
oldBucketIdx := a.calcOldBucket(ka.Addr)
added := a.addToOldBucket(ka, oldBucketIdx)
if !added {
// No room, must evict something
oldest := a.pickOldest(bucketTypeOld, oldBucketIdx)
a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx)
// Find new bucket to put oldest in
newBucketIdx := a.calcNewBucket(oldest.Addr, oldest.Src)
added := a.addToNewBucket(oldest, newBucketIdx)
// No space in newBucket either, just put it in freedBucket from above.
if !added {
added := a.addToNewBucket(oldest, freedBucket)
if !added {
a.Logger.Error(cmn.Fmt("Could not migrate oldest %v to freedBucket %v", oldest, freedBucket))
}
}
// Finally, add to bucket again.
added = a.addToOldBucket(ka, oldBucketIdx)
if !added {
a.Logger.Error(cmn.Fmt("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx))
}
}
}
// doublesha256( key + sourcegroup +
// int64(doublesha256(key + group + sourcegroup))%bucket_per_group ) % num_new_buckets
func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int {
data1 := []byte{}
data1 = append(data1, []byte(a.key)...)
data1 = append(data1, []byte(a.groupKey(addr))...)
data1 = append(data1, []byte(a.groupKey(src))...)
hash1 := doubleSha256(data1)
hash64 := binary.BigEndian.Uint64(hash1)
hash64 %= newBucketsPerGroup
var hashbuf [8]byte
binary.BigEndian.PutUint64(hashbuf[:], hash64)
data2 := []byte{}
data2 = append(data2, []byte(a.key)...)
data2 = append(data2, a.groupKey(src)...)
data2 = append(data2, hashbuf[:]...)
hash2 := doubleSha256(data2)
return int(binary.BigEndian.Uint64(hash2) % newBucketCount)
}
// doublesha256( key + group +
// int64(doublesha256(key + addr))%buckets_per_group ) % num_old_buckets
func (a *AddrBook) calcOldBucket(addr *NetAddress) int {
data1 := []byte{}
data1 = append(data1, []byte(a.key)...)
data1 = append(data1, []byte(addr.String())...)
hash1 := doubleSha256(data1)
hash64 := binary.BigEndian.Uint64(hash1)
hash64 %= oldBucketsPerGroup
var hashbuf [8]byte
binary.BigEndian.PutUint64(hashbuf[:], hash64)
data2 := []byte{}
data2 = append(data2, []byte(a.key)...)
data2 = append(data2, a.groupKey(addr)...)
data2 = append(data2, hashbuf[:]...)
hash2 := doubleSha256(data2)
return int(binary.BigEndian.Uint64(hash2) % oldBucketCount)
}
// Return a string representing the network group of this address.
// This is the /16 for IPv6, the /32 (/36 for he.net) for IPv6, the string
// "local" for a local address and the string "unroutable for an unroutable
// address.
func (a *AddrBook) groupKey(na *NetAddress) string {
if a.routabilityStrict && na.Local() {
return "local"
}
if a.routabilityStrict && !na.Routable() {
return "unroutable"
}
if ipv4 := na.IP.To4(); ipv4 != nil {
return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String()
}
if na.RFC6145() || na.RFC6052() {
// last four bytes are the ip address
ip := net.IP(na.IP[12:16])
return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
}
if na.RFC3964() {
ip := net.IP(na.IP[2:7])
return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
}
if na.RFC4380() {
// teredo tunnels have the last 4 bytes as the v4 address XOR
// 0xff.
ip := net.IP(make([]byte, 4))
for i, byte := range na.IP[12:16] {
ip[i] = byte ^ 0xff
}
return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
}
// OK, so now we know ourselves to be a IPv6 address.
// bitcoind uses /32 for everything, except for Hurricane Electric's
// (he.net) IP range, which it uses /36 for.
bits := 32
heNet := &net.IPNet{IP: net.ParseIP("2001:470::"),
Mask: net.CIDRMask(32, 128)}
if heNet.Contains(na.IP) {
bits = 36
}
return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String()
}
//-----------------------------------------------------------------------------
/*
knownAddress
tracks information about a known network address that is used
to determine how viable an address is.
*/
type knownAddress struct {
Addr *NetAddress
Src *NetAddress
Attempts int32
LastAttempt time.Time
LastSuccess time.Time
BucketType byte
Buckets []int
}
func newKnownAddress(addr *NetAddress, src *NetAddress) *knownAddress {
return &knownAddress{
Addr: addr,
Src: src,
Attempts: 0,
LastAttempt: time.Now(),
BucketType: bucketTypeNew,
Buckets: nil,
}
}
func (ka *knownAddress) isOld() bool {
return ka.BucketType == bucketTypeOld
}
func (ka *knownAddress) isNew() bool {
return ka.BucketType == bucketTypeNew
}
func (ka *knownAddress) markAttempt() {
now := time.Now()
ka.LastAttempt = now
ka.Attempts += 1
}
func (ka *knownAddress) markGood() {
now := time.Now()
ka.LastAttempt = now
ka.Attempts = 0
ka.LastSuccess = now
}
func (ka *knownAddress) addBucketRef(bucketIdx int) int {
for _, bucket := range ka.Buckets {
if bucket == bucketIdx {
// TODO refactor to return error?
// log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka))
return -1
}
}
ka.Buckets = append(ka.Buckets, bucketIdx)
return len(ka.Buckets)
}
func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
buckets := []int{}
for _, bucket := range ka.Buckets {
if bucket != bucketIdx {
buckets = append(buckets, bucket)
}
}
if len(buckets) != len(ka.Buckets)-1 {
// TODO refactor to return error?
// log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka))
return -1
}
ka.Buckets = buckets
return len(ka.Buckets)
}
/*
An address is bad if the address in question has not been tried in the last
minute and meets one of the following criteria:
1) It claims to be from the future
2) It hasn't been seen in over a month
3) It has failed at least three times and never succeeded
4) It has failed ten times in the last week
All addresses that meet these criteria are assumed to be worthless and not
worth keeping hold of.
*/
func (ka *knownAddress) isBad() bool {
// Has been attempted in the last minute --> good
if ka.LastAttempt.Before(time.Now().Add(-1 * time.Minute)) {
return false
}
// Over a month old?
if ka.LastAttempt.After(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
return true
}
// Never succeeded?
if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries {
return true
}
// Hasn't succeeded in too long?
if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
ka.Attempts >= maxFailures {
return true
}
return false
}

174
p2p/addrbook_test.go Normal file
View File

@@ -0,0 +1,174 @@
package p2p
import (
"fmt"
"io/ioutil"
"math/rand"
"testing"
"github.com/stretchr/testify/assert"
"github.com/tendermint/tmlibs/log"
)
func createTempFileName(prefix string) string {
f, err := ioutil.TempFile("", prefix)
if err != nil {
panic(err)
}
fname := f.Name()
err = f.Close()
if err != nil {
panic(err)
}
return fname
}
func TestAddrBookSaveLoad(t *testing.T) {
fname := createTempFileName("addrbook_test")
// 0 addresses
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
book.saveToFile(fname)
book = NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
book.loadFromFile(fname)
assert.Zero(t, book.Size())
// 100 addresses
randAddrs := randNetAddressPairs(t, 100)
for _, addrSrc := range randAddrs {
book.AddAddress(addrSrc.addr, addrSrc.src)
}
assert.Equal(t, 100, book.Size())
book.saveToFile(fname)
book = NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
book.loadFromFile(fname)
assert.Equal(t, 100, book.Size())
}
func TestAddrBookLookup(t *testing.T) {
fname := createTempFileName("addrbook_test")
randAddrs := randNetAddressPairs(t, 100)
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
for _, addrSrc := range randAddrs {
addr := addrSrc.addr
src := addrSrc.src
book.AddAddress(addr, src)
ka := book.addrLookup[addr.String()]
assert.NotNil(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr)
if !(ka.Addr.Equals(addr) && ka.Src.Equals(src)) {
t.Fatalf("KnownAddress doesn't match addr & src")
}
}
}
func TestAddrBookPromoteToOld(t *testing.T) {
fname := createTempFileName("addrbook_test")
randAddrs := randNetAddressPairs(t, 100)
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
for _, addrSrc := range randAddrs {
book.AddAddress(addrSrc.addr, addrSrc.src)
}
// Attempt all addresses.
for _, addrSrc := range randAddrs {
book.MarkAttempt(addrSrc.addr)
}
// Promote half of them
for i, addrSrc := range randAddrs {
if i%2 == 0 {
book.MarkGood(addrSrc.addr)
}
}
// TODO: do more testing :)
selection := book.GetSelection()
t.Logf("selection: %v", selection)
if len(selection) > book.Size() {
t.Errorf("selection could not be bigger than the book")
}
}
func TestAddrBookHandlesDuplicates(t *testing.T) {
fname := createTempFileName("addrbook_test")
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
randAddrs := randNetAddressPairs(t, 100)
differentSrc := randIPv4Address(t)
for _, addrSrc := range randAddrs {
book.AddAddress(addrSrc.addr, addrSrc.src)
book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate
book.AddAddress(addrSrc.addr, differentSrc) // different src
}
assert.Equal(t, 100, book.Size())
}
type netAddressPair struct {
addr *NetAddress
src *NetAddress
}
func randNetAddressPairs(t *testing.T, n int) []netAddressPair {
randAddrs := make([]netAddressPair, n)
for i := 0; i < n; i++ {
randAddrs[i] = netAddressPair{addr: randIPv4Address(t), src: randIPv4Address(t)}
}
return randAddrs
}
func randIPv4Address(t *testing.T) *NetAddress {
for {
ip := fmt.Sprintf("%v.%v.%v.%v",
rand.Intn(254)+1,
rand.Intn(255),
rand.Intn(255),
rand.Intn(255),
)
port := rand.Intn(65535-1) + 1
addr, err := NewNetAddressString(fmt.Sprintf("%v:%v", ip, port))
assert.Nil(t, err, "error generating rand network address")
if addr.Routable() {
return addr
}
}
}
func TestAddrBookRemoveAddress(t *testing.T) {
fname := createTempFileName("addrbook_test")
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
addr := randIPv4Address(t)
book.AddAddress(addr, addr)
assert.Equal(t, 1, book.Size())
book.RemoveAddress(addr)
assert.Equal(t, 0, book.Size())
nonExistingAddr := randIPv4Address(t)
book.RemoveAddress(nonExistingAddr)
assert.Equal(t, 0, book.Size())
}

686
p2p/connection.go Normal file
View File

@@ -0,0 +1,686 @@
package p2p
import (
"bufio"
"fmt"
"io"
"math"
"net"
"runtime/debug"
"sync/atomic"
"time"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
flow "github.com/tendermint/tmlibs/flowrate"
)
const (
numBatchMsgPackets = 10
minReadBufferSize = 1024
minWriteBufferSize = 65536
updateState = 2 * time.Second
pingTimeout = 40 * time.Second
flushThrottle = 100 * time.Millisecond
defaultSendQueueCapacity = 1
defaultSendRate = int64(512000) // 500KB/s
defaultRecvBufferCapacity = 4096
defaultRecvMessageCapacity = 22020096 // 21MB
defaultRecvRate = int64(512000) // 500KB/s
defaultSendTimeout = 10 * time.Second
)
type receiveCbFunc func(chID byte, msgBytes []byte)
type errorCbFunc func(interface{})
/*
Each peer has one `MConnection` (multiplex connection) instance.
__multiplex__ *noun* a system or signal involving simultaneous transmission of
several messages along a single channel of communication.
Each `MConnection` handles message transmission on multiple abstract communication
`Channel`s. Each channel has a globally unique byte id.
The byte id and the relative priorities of each `Channel` are configured upon
initialization of the connection.
There are two methods for sending messages:
func (m MConnection) Send(chID byte, msg interface{}) bool {}
func (m MConnection) TrySend(chID byte, msg interface{}) bool {}
`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued
for the channel with the given id byte `chID`, or until the request times out.
The message `msg` is serialized using the `tendermint/wire` submodule's
`WriteBinary()` reflection routine.
`TrySend(chID, msg)` is a nonblocking call that returns false if the channel's
queue is full.
Inbound message bytes are handled with an onReceive callback function.
*/
type MConnection struct {
cmn.BaseService
conn net.Conn
bufReader *bufio.Reader
bufWriter *bufio.Writer
sendMonitor *flow.Monitor
recvMonitor *flow.Monitor
send chan struct{}
pong chan struct{}
channels []*Channel
channelsIdx map[byte]*Channel
onReceive receiveCbFunc
onError errorCbFunc
errored uint32
config *MConnConfig
quit chan struct{}
flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled.
pingTimer *cmn.RepeatTimer // send pings periodically
chStatsTimer *cmn.RepeatTimer // update channel stats periodically
LocalAddress *NetAddress
RemoteAddress *NetAddress
}
// MConnConfig is a MConnection configuration.
type MConnConfig struct {
SendRate int64 `mapstructure:"send_rate"`
RecvRate int64 `mapstructure:"recv_rate"`
}
// DefaultMConnConfig returns the default config.
func DefaultMConnConfig() *MConnConfig {
return &MConnConfig{
SendRate: defaultSendRate,
RecvRate: defaultRecvRate,
}
}
// NewMConnection wraps net.Conn and creates multiplex connection
func NewMConnection(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc) *MConnection {
return NewMConnectionWithConfig(
conn,
chDescs,
onReceive,
onError,
DefaultMConnConfig())
}
// NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config
func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc, config *MConnConfig) *MConnection {
mconn := &MConnection{
conn: conn,
bufReader: bufio.NewReaderSize(conn, minReadBufferSize),
bufWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
sendMonitor: flow.New(0, 0),
recvMonitor: flow.New(0, 0),
send: make(chan struct{}, 1),
pong: make(chan struct{}),
onReceive: onReceive,
onError: onError,
config: config,
LocalAddress: NewNetAddress(conn.LocalAddr()),
RemoteAddress: NewNetAddress(conn.RemoteAddr()),
}
// Create channels
var channelsIdx = map[byte]*Channel{}
var channels = []*Channel{}
for _, desc := range chDescs {
descCopy := *desc // copy the desc else unsafe access across connections
channel := newChannel(mconn, &descCopy)
channelsIdx[channel.id] = channel
channels = append(channels, channel)
}
mconn.channels = channels
mconn.channelsIdx = channelsIdx
mconn.BaseService = *cmn.NewBaseService(nil, "MConnection", mconn)
return mconn
}
func (c *MConnection) OnStart() error {
c.BaseService.OnStart()
c.quit = make(chan struct{})
c.flushTimer = cmn.NewThrottleTimer("flush", flushThrottle)
c.pingTimer = cmn.NewRepeatTimer("ping", pingTimeout)
c.chStatsTimer = cmn.NewRepeatTimer("chStats", updateState)
go c.sendRoutine()
go c.recvRoutine()
return nil
}
func (c *MConnection) OnStop() {
c.BaseService.OnStop()
c.flushTimer.Stop()
c.pingTimer.Stop()
c.chStatsTimer.Stop()
if c.quit != nil {
close(c.quit)
}
c.conn.Close()
// We can't close pong safely here because
// recvRoutine may write to it after we've stopped.
// Though it doesn't need to get closed at all,
// we close it @ recvRoutine.
// close(c.pong)
}
func (c *MConnection) String() string {
return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr())
}
func (c *MConnection) flush() {
c.Logger.Debug("Flush", "conn", c)
err := c.bufWriter.Flush()
if err != nil {
c.Logger.Error("MConnection flush failed", "error", err)
}
}
// Catch panics, usually caused by remote disconnects.
func (c *MConnection) _recover() {
if r := recover(); r != nil {
stack := debug.Stack()
err := cmn.StackError{r, stack}
c.stopForError(err)
}
}
func (c *MConnection) stopForError(r interface{}) {
c.Stop()
if atomic.CompareAndSwapUint32(&c.errored, 0, 1) {
if c.onError != nil {
c.onError(r)
}
}
}
// Queues a message to be sent to channel.
func (c *MConnection) Send(chID byte, msg interface{}) bool {
if !c.IsRunning() {
return false
}
c.Logger.Debug("Send", "channel", chID, "conn", c, "msg", msg) //, "bytes", wire.BinaryBytes(msg))
// Send message to channel.
channel, ok := c.channelsIdx[chID]
if !ok {
c.Logger.Error(cmn.Fmt("Cannot send bytes, unknown channel %X", chID))
return false
}
success := channel.sendBytes(wire.BinaryBytes(msg))
if success {
// Wake up sendRoutine if necessary
select {
case c.send <- struct{}{}:
default:
}
} else {
c.Logger.Error("Send failed", "channel", chID, "conn", c, "msg", msg)
}
return success
}
// Queues a message to be sent to channel.
// Nonblocking, returns true if successful.
func (c *MConnection) TrySend(chID byte, msg interface{}) bool {
if !c.IsRunning() {
return false
}
c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msg", msg)
// Send message to channel.
channel, ok := c.channelsIdx[chID]
if !ok {
c.Logger.Error(cmn.Fmt("Cannot send bytes, unknown channel %X", chID))
return false
}
ok = channel.trySendBytes(wire.BinaryBytes(msg))
if ok {
// Wake up sendRoutine if necessary
select {
case c.send <- struct{}{}:
default:
}
}
return ok
}
// CanSend returns true if you can send more data onto the chID, false
// otherwise. Use only as a heuristic.
func (c *MConnection) CanSend(chID byte) bool {
if !c.IsRunning() {
return false
}
channel, ok := c.channelsIdx[chID]
if !ok {
c.Logger.Error(cmn.Fmt("Unknown channel %X", chID))
return false
}
return channel.canSend()
}
// sendRoutine polls for packets to send from channels.
func (c *MConnection) sendRoutine() {
defer c._recover()
FOR_LOOP:
for {
var n int
var err error
select {
case <-c.flushTimer.Ch:
// NOTE: flushTimer.Set() must be called every time
// something is written to .bufWriter.
c.flush()
case <-c.chStatsTimer.Ch:
for _, channel := range c.channels {
channel.updateStats()
}
case <-c.pingTimer.Ch:
c.Logger.Debug("Send Ping")
wire.WriteByte(packetTypePing, c.bufWriter, &n, &err)
c.sendMonitor.Update(int(n))
c.flush()
case <-c.pong:
c.Logger.Debug("Send Pong")
wire.WriteByte(packetTypePong, c.bufWriter, &n, &err)
c.sendMonitor.Update(int(n))
c.flush()
case <-c.quit:
break FOR_LOOP
case <-c.send:
// Send some msgPackets
eof := c.sendSomeMsgPackets()
if !eof {
// Keep sendRoutine awake.
select {
case c.send <- struct{}{}:
default:
}
}
}
if !c.IsRunning() {
break FOR_LOOP
}
if err != nil {
c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "error", err)
c.stopForError(err)
break FOR_LOOP
}
}
// Cleanup
}
// Returns true if messages from channels were exhausted.
// Blocks in accordance to .sendMonitor throttling.
func (c *MConnection) sendSomeMsgPackets() bool {
// Block until .sendMonitor says we can write.
// Once we're ready we send more than we asked for,
// but amortized it should even out.
c.sendMonitor.Limit(maxMsgPacketTotalSize, atomic.LoadInt64(&c.config.SendRate), true)
// Now send some msgPackets.
for i := 0; i < numBatchMsgPackets; i++ {
if c.sendMsgPacket() {
return true
}
}
return false
}
// Returns true if messages from channels were exhausted.
func (c *MConnection) sendMsgPacket() bool {
// Choose a channel to create a msgPacket from.
// The chosen channel will be the one whose recentlySent/priority is the least.
var leastRatio float32 = math.MaxFloat32
var leastChannel *Channel
for _, channel := range c.channels {
// If nothing to send, skip this channel
if !channel.isSendPending() {
continue
}
// Get ratio, and keep track of lowest ratio.
ratio := float32(channel.recentlySent) / float32(channel.priority)
if ratio < leastRatio {
leastRatio = ratio
leastChannel = channel
}
}
// Nothing to send?
if leastChannel == nil {
return true
} else {
// c.Logger.Info("Found a msgPacket to send")
}
// Make & send a msgPacket from this channel
n, err := leastChannel.writeMsgPacketTo(c.bufWriter)
if err != nil {
c.Logger.Error("Failed to write msgPacket", "error", err)
c.stopForError(err)
return true
}
c.sendMonitor.Update(int(n))
c.flushTimer.Set()
return false
}
// recvRoutine reads msgPackets and reconstructs the message using the channels' "recving" buffer.
// After a whole message has been assembled, it's pushed to onReceive().
// Blocks depending on how the connection is throttled.
func (c *MConnection) recvRoutine() {
defer c._recover()
FOR_LOOP:
for {
// Block until .recvMonitor says we can read.
c.recvMonitor.Limit(maxMsgPacketTotalSize, atomic.LoadInt64(&c.config.RecvRate), true)
/*
// Peek into bufReader for debugging
if numBytes := c.bufReader.Buffered(); numBytes > 0 {
log.Info("Peek connection buffer", "numBytes", numBytes, "bytes", log15.Lazy{func() []byte {
bytes, err := c.bufReader.Peek(MinInt(numBytes, 100))
if err == nil {
return bytes
} else {
log.Warn("Error peeking connection buffer", "error", err)
return nil
}
}})
}
*/
// Read packet type
var n int
var err error
pktType := wire.ReadByte(c.bufReader, &n, &err)
c.recvMonitor.Update(int(n))
if err != nil {
if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "error", err)
c.stopForError(err)
}
break FOR_LOOP
}
// Read more depending on packet type.
switch pktType {
case packetTypePing:
// TODO: prevent abuse, as they cause flush()'s.
c.Logger.Debug("Receive Ping")
c.pong <- struct{}{}
case packetTypePong:
// do nothing
c.Logger.Debug("Receive Pong")
case packetTypeMsg:
pkt, n, err := msgPacket{}, int(0), error(nil)
wire.ReadBinaryPtr(&pkt, c.bufReader, maxMsgPacketTotalSize, &n, &err)
c.recvMonitor.Update(int(n))
if err != nil {
if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "error", err)
c.stopForError(err)
}
break FOR_LOOP
}
channel, ok := c.channelsIdx[pkt.ChannelID]
if !ok || channel == nil {
cmn.PanicQ(cmn.Fmt("Unknown channel %X", pkt.ChannelID))
}
msgBytes, err := channel.recvMsgPacket(pkt)
if err != nil {
if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "error", err)
c.stopForError(err)
}
break FOR_LOOP
}
if msgBytes != nil {
c.Logger.Debug("Received bytes", "chID", pkt.ChannelID, "msgBytes", msgBytes)
c.onReceive(pkt.ChannelID, msgBytes)
}
default:
cmn.PanicSanity(cmn.Fmt("Unknown message type %X", pktType))
}
// TODO: shouldn't this go in the sendRoutine?
// Better to send a ping packet when *we* haven't sent anything for a while.
c.pingTimer.Reset()
}
// Cleanup
close(c.pong)
for _ = range c.pong {
// Drain
}
}
type ConnectionStatus struct {
SendMonitor flow.Status
RecvMonitor flow.Status
Channels []ChannelStatus
}
type ChannelStatus struct {
ID byte
SendQueueCapacity int
SendQueueSize int
Priority int
RecentlySent int64
}
func (c *MConnection) Status() ConnectionStatus {
var status ConnectionStatus
status.SendMonitor = c.sendMonitor.Status()
status.RecvMonitor = c.recvMonitor.Status()
status.Channels = make([]ChannelStatus, len(c.channels))
for i, channel := range c.channels {
status.Channels[i] = ChannelStatus{
ID: channel.id,
SendQueueCapacity: cap(channel.sendQueue),
SendQueueSize: int(channel.sendQueueSize), // TODO use atomic
Priority: channel.priority,
RecentlySent: channel.recentlySent,
}
}
return status
}
//-----------------------------------------------------------------------------
type ChannelDescriptor struct {
ID byte
Priority int
SendQueueCapacity int
RecvBufferCapacity int
RecvMessageCapacity int
}
func (chDesc *ChannelDescriptor) FillDefaults() {
if chDesc.SendQueueCapacity == 0 {
chDesc.SendQueueCapacity = defaultSendQueueCapacity
}
if chDesc.RecvBufferCapacity == 0 {
chDesc.RecvBufferCapacity = defaultRecvBufferCapacity
}
if chDesc.RecvMessageCapacity == 0 {
chDesc.RecvMessageCapacity = defaultRecvMessageCapacity
}
}
// TODO: lowercase.
// NOTE: not goroutine-safe.
type Channel struct {
conn *MConnection
desc *ChannelDescriptor
id byte
sendQueue chan []byte
sendQueueSize int32 // atomic.
recving []byte
sending []byte
priority int
recentlySent int64 // exponential moving average
}
func newChannel(conn *MConnection, desc *ChannelDescriptor) *Channel {
desc.FillDefaults()
if desc.Priority <= 0 {
cmn.PanicSanity("Channel default priority must be a postive integer")
}
return &Channel{
conn: conn,
desc: desc,
id: desc.ID,
sendQueue: make(chan []byte, desc.SendQueueCapacity),
recving: make([]byte, 0, desc.RecvBufferCapacity),
priority: desc.Priority,
}
}
// Queues message to send to this channel.
// Goroutine-safe
// Times out (and returns false) after defaultSendTimeout
func (ch *Channel) sendBytes(bytes []byte) bool {
select {
case ch.sendQueue <- bytes:
atomic.AddInt32(&ch.sendQueueSize, 1)
return true
case <-time.After(defaultSendTimeout):
return false
}
}
// Queues message to send to this channel.
// Nonblocking, returns true if successful.
// Goroutine-safe
func (ch *Channel) trySendBytes(bytes []byte) bool {
select {
case ch.sendQueue <- bytes:
atomic.AddInt32(&ch.sendQueueSize, 1)
return true
default:
return false
}
}
// Goroutine-safe
func (ch *Channel) loadSendQueueSize() (size int) {
return int(atomic.LoadInt32(&ch.sendQueueSize))
}
// Goroutine-safe
// Use only as a heuristic.
func (ch *Channel) canSend() bool {
return ch.loadSendQueueSize() < defaultSendQueueCapacity
}
// Returns true if any msgPackets are pending to be sent.
// Call before calling nextMsgPacket()
// Goroutine-safe
func (ch *Channel) isSendPending() bool {
if len(ch.sending) == 0 {
if len(ch.sendQueue) == 0 {
return false
}
ch.sending = <-ch.sendQueue
}
return true
}
// Creates a new msgPacket to send.
// Not goroutine-safe
func (ch *Channel) nextMsgPacket() msgPacket {
packet := msgPacket{}
packet.ChannelID = byte(ch.id)
packet.Bytes = ch.sending[:cmn.MinInt(maxMsgPacketPayloadSize, len(ch.sending))]
if len(ch.sending) <= maxMsgPacketPayloadSize {
packet.EOF = byte(0x01)
ch.sending = nil
atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize
} else {
packet.EOF = byte(0x00)
ch.sending = ch.sending[cmn.MinInt(maxMsgPacketPayloadSize, len(ch.sending)):]
}
return packet
}
// Writes next msgPacket to w.
// Not goroutine-safe
func (ch *Channel) writeMsgPacketTo(w io.Writer) (n int, err error) {
packet := ch.nextMsgPacket()
// log.Debug("Write Msg Packet", "conn", ch.conn, "packet", packet)
wire.WriteByte(packetTypeMsg, w, &n, &err)
wire.WriteBinary(packet, w, &n, &err)
if err == nil {
ch.recentlySent += int64(n)
}
return
}
// Handles incoming msgPackets. Returns a msg bytes if msg is complete.
// Not goroutine-safe
func (ch *Channel) recvMsgPacket(packet msgPacket) ([]byte, error) {
// log.Debug("Read Msg Packet", "conn", ch.conn, "packet", packet)
if ch.desc.RecvMessageCapacity < len(ch.recving)+len(packet.Bytes) {
return nil, wire.ErrBinaryReadOverflow
}
ch.recving = append(ch.recving, packet.Bytes...)
if packet.EOF == byte(0x01) {
msgBytes := ch.recving
// clear the slice without re-allocating.
// http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go
// suggests this could be a memory leak, but we might as well keep the memory for the channel until it closes,
// at which point the recving slice stops being used and should be garbage collected
ch.recving = ch.recving[:0] // make([]byte, 0, ch.desc.RecvBufferCapacity)
return msgBytes, nil
}
return nil, nil
}
// Call this periodically to update stats for throttling purposes.
// Not goroutine-safe
func (ch *Channel) updateStats() {
// Exponential decay of stats.
// TODO: optimize.
ch.recentlySent = int64(float64(ch.recentlySent) * 0.8)
}
//-----------------------------------------------------------------------------
const (
maxMsgPacketPayloadSize = 1024
maxMsgPacketOverheadSize = 10 // It's actually lower but good enough
maxMsgPacketTotalSize = maxMsgPacketPayloadSize + maxMsgPacketOverheadSize
packetTypePing = byte(0x01)
packetTypePong = byte(0x02)
packetTypeMsg = byte(0x03)
)
// Messages in channels are chopped into smaller msgPackets for multiplexing.
type msgPacket struct {
ChannelID byte
EOF byte // 1 means message ends here.
Bytes []byte
}
func (p msgPacket) String() string {
return fmt.Sprintf("MsgPacket{%X:%X T:%X}", p.ChannelID, p.Bytes, p.EOF)
}

144
p2p/connection_test.go Normal file
View File

@@ -0,0 +1,144 @@
package p2p_test
import (
"net"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
p2p "github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tmlibs/log"
)
func createMConnection(conn net.Conn) *p2p.MConnection {
onReceive := func(chID byte, msgBytes []byte) {
}
onError := func(r interface{}) {
}
c := createMConnectionWithCallbacks(conn, onReceive, onError)
c.SetLogger(log.TestingLogger())
return c
}
func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msgBytes []byte), onError func(r interface{})) *p2p.MConnection {
chDescs := []*p2p.ChannelDescriptor{&p2p.ChannelDescriptor{ID: 0x01, Priority: 1, SendQueueCapacity: 1}}
c := p2p.NewMConnection(conn, chDescs, onReceive, onError)
c.SetLogger(log.TestingLogger())
return c
}
func TestMConnectionSend(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := net.Pipe()
defer server.Close()
defer client.Close()
mconn := createMConnection(client)
_, err := mconn.Start()
require.Nil(err)
defer mconn.Stop()
msg := "Ant-Man"
assert.True(mconn.Send(0x01, msg))
// Note: subsequent Send/TrySend calls could pass because we are reading from
// the send queue in a separate goroutine.
server.Read(make([]byte, len(msg)))
assert.True(mconn.CanSend(0x01))
msg = "Spider-Man"
assert.True(mconn.TrySend(0x01, msg))
server.Read(make([]byte, len(msg)))
assert.False(mconn.CanSend(0x05), "CanSend should return false because channel is unknown")
assert.False(mconn.Send(0x05, "Absorbing Man"), "Send should return false because channel is unknown")
}
func TestMConnectionReceive(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := net.Pipe()
defer server.Close()
defer client.Close()
receivedCh := make(chan []byte)
errorsCh := make(chan interface{})
onReceive := func(chID byte, msgBytes []byte) {
receivedCh <- msgBytes
}
onError := func(r interface{}) {
errorsCh <- r
}
mconn1 := createMConnectionWithCallbacks(client, onReceive, onError)
_, err := mconn1.Start()
require.Nil(err)
defer mconn1.Stop()
mconn2 := createMConnection(server)
_, err = mconn2.Start()
require.Nil(err)
defer mconn2.Stop()
msg := "Cyclops"
assert.True(mconn2.Send(0x01, msg))
select {
case receivedBytes := <-receivedCh:
assert.Equal([]byte(msg), receivedBytes[2:]) // first 3 bytes are internal
case err := <-errorsCh:
t.Fatalf("Expected %s, got %+v", msg, err)
case <-time.After(500 * time.Millisecond):
t.Fatalf("Did not receive %s message in 500ms", msg)
}
}
func TestMConnectionStatus(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := net.Pipe()
defer server.Close()
defer client.Close()
mconn := createMConnection(client)
_, err := mconn.Start()
require.Nil(err)
defer mconn.Stop()
status := mconn.Status()
assert.NotNil(status)
assert.Zero(status.Channels[0].SendQueueSize)
}
func TestMConnectionStopsAndReturnsError(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := net.Pipe()
defer server.Close()
defer client.Close()
receivedCh := make(chan []byte)
errorsCh := make(chan interface{})
onReceive := func(chID byte, msgBytes []byte) {
receivedCh <- msgBytes
}
onError := func(r interface{}) {
errorsCh <- r
}
mconn := createMConnectionWithCallbacks(client, onReceive, onError)
_, err := mconn.Start()
require.Nil(err)
defer mconn.Stop()
client.Close()
select {
case receivedBytes := <-receivedCh:
t.Fatalf("Expected error, got %v", receivedBytes)
case err := <-errorsCh:
assert.NotNil(err)
assert.False(mconn.IsRunning())
case <-time.After(500 * time.Millisecond):
t.Fatal("Did not receive error in 500ms")
}
}

173
p2p/fuzz.go Normal file
View File

@@ -0,0 +1,173 @@
package p2p
import (
"math/rand"
"net"
"sync"
"time"
)
const (
// FuzzModeDrop is a mode in which we randomly drop reads/writes, connections or sleep
FuzzModeDrop = iota
// FuzzModeDelay is a mode in which we randomly sleep
FuzzModeDelay
)
// FuzzedConnection wraps any net.Conn and depending on the mode either delays
// reads/writes or randomly drops reads/writes/connections.
type FuzzedConnection struct {
conn net.Conn
mtx sync.Mutex
start <-chan time.Time
active bool
config *FuzzConnConfig
}
// FuzzConnConfig is a FuzzedConnection configuration.
type FuzzConnConfig struct {
Mode int
MaxDelay time.Duration
ProbDropRW float64
ProbDropConn float64
ProbSleep float64
}
// DefaultFuzzConnConfig returns the default config.
func DefaultFuzzConnConfig() *FuzzConnConfig {
return &FuzzConnConfig{
Mode: FuzzModeDrop,
MaxDelay: 3 * time.Second,
ProbDropRW: 0.2,
ProbDropConn: 0.00,
ProbSleep: 0.00,
}
}
// FuzzConn creates a new FuzzedConnection. Fuzzing starts immediately.
func FuzzConn(conn net.Conn) net.Conn {
return FuzzConnFromConfig(conn, DefaultFuzzConnConfig())
}
// FuzzConnFromConfig creates a new FuzzedConnection from a config. Fuzzing
// starts immediately.
func FuzzConnFromConfig(conn net.Conn, config *FuzzConnConfig) net.Conn {
return &FuzzedConnection{
conn: conn,
start: make(<-chan time.Time),
active: true,
config: config,
}
}
// FuzzConnAfter creates a new FuzzedConnection. Fuzzing starts when the
// duration elapses.
func FuzzConnAfter(conn net.Conn, d time.Duration) net.Conn {
return FuzzConnAfterFromConfig(conn, d, DefaultFuzzConnConfig())
}
// FuzzConnAfterFromConfig creates a new FuzzedConnection from a config.
// Fuzzing starts when the duration elapses.
func FuzzConnAfterFromConfig(conn net.Conn, d time.Duration, config *FuzzConnConfig) net.Conn {
return &FuzzedConnection{
conn: conn,
start: time.After(d),
active: false,
config: config,
}
}
// Config returns the connection's config.
func (fc *FuzzedConnection) Config() *FuzzConnConfig {
return fc.config
}
// Read implements net.Conn.
func (fc *FuzzedConnection) Read(data []byte) (n int, err error) {
if fc.fuzz() {
return 0, nil
}
return fc.conn.Read(data)
}
// Write implements net.Conn.
func (fc *FuzzedConnection) Write(data []byte) (n int, err error) {
if fc.fuzz() {
return 0, nil
}
return fc.conn.Write(data)
}
// Close implements net.Conn.
func (fc *FuzzedConnection) Close() error { return fc.conn.Close() }
// LocalAddr implements net.Conn.
func (fc *FuzzedConnection) LocalAddr() net.Addr { return fc.conn.LocalAddr() }
// RemoteAddr implements net.Conn.
func (fc *FuzzedConnection) RemoteAddr() net.Addr { return fc.conn.RemoteAddr() }
// SetDeadline implements net.Conn.
func (fc *FuzzedConnection) SetDeadline(t time.Time) error { return fc.conn.SetDeadline(t) }
// SetReadDeadline implements net.Conn.
func (fc *FuzzedConnection) SetReadDeadline(t time.Time) error {
return fc.conn.SetReadDeadline(t)
}
// SetWriteDeadline implements net.Conn.
func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error {
return fc.conn.SetWriteDeadline(t)
}
func (fc *FuzzedConnection) randomDuration() time.Duration {
maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000)
return time.Millisecond * time.Duration(rand.Int()%maxDelayMillis)
}
// implements the fuzz (delay, kill conn)
// and returns whether or not the read/write should be ignored
func (fc *FuzzedConnection) fuzz() bool {
if !fc.shouldFuzz() {
return false
}
switch fc.config.Mode {
case FuzzModeDrop:
// randomly drop the r/w, drop the conn, or sleep
r := rand.Float64()
if r <= fc.config.ProbDropRW {
return true
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn {
// XXX: can't this fail because machine precision?
// XXX: do we need an error?
fc.Close()
return true
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep {
time.Sleep(fc.randomDuration())
}
case FuzzModeDelay:
// sleep a bit
time.Sleep(fc.randomDuration())
}
return false
}
func (fc *FuzzedConnection) shouldFuzz() bool {
if fc.active {
return true
}
fc.mtx.Lock()
defer fc.mtx.Unlock()
select {
case <-fc.start:
fc.active = true
return true
default:
return false
}
}

29
p2p/ip_range_counter.go Normal file
View File

@@ -0,0 +1,29 @@
package p2p
import (
"strings"
)
// TODO Test
func AddToIPRangeCounts(counts map[string]int, ip string) map[string]int {
changes := make(map[string]int)
ipParts := strings.Split(ip, ":")
for i := 1; i < len(ipParts); i++ {
prefix := strings.Join(ipParts[:i], ":")
counts[prefix] += 1
changes[prefix] = counts[prefix]
}
return changes
}
// TODO Test
func CheckIPRangeCounts(counts map[string]int, limits []int) bool {
for prefix, count := range counts {
ipParts := strings.Split(prefix, ":")
numParts := len(ipParts)
if limits[numParts] < count {
return false
}
}
return true
}

218
p2p/listener.go Normal file
View File

@@ -0,0 +1,218 @@
package p2p
import (
"fmt"
"net"
"strconv"
"time"
"github.com/tendermint/tendermint/p2p/upnp"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
)
type Listener interface {
Connections() <-chan net.Conn
InternalAddress() *NetAddress
ExternalAddress() *NetAddress
String() string
Stop() bool
}
// Implements Listener
type DefaultListener struct {
cmn.BaseService
listener net.Listener
intAddr *NetAddress
extAddr *NetAddress
connections chan net.Conn
}
const (
numBufferedConnections = 10
defaultExternalPort = 8770
tryListenSeconds = 5
)
func splitHostPort(addr string) (host string, port int) {
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
cmn.PanicSanity(err)
}
port, err = strconv.Atoi(portStr)
if err != nil {
cmn.PanicSanity(err)
}
return host, port
}
// skipUPNP: If true, does not try getUPNPExternalAddress()
func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log.Logger) Listener {
// Local listen IP & port
lAddrIP, lAddrPort := splitHostPort(lAddr)
// Create listener
var listener net.Listener
var err error
for i := 0; i < tryListenSeconds; i++ {
listener, err = net.Listen(protocol, lAddr)
if err == nil {
break
} else if i < tryListenSeconds-1 {
time.Sleep(time.Second * 1)
}
}
if err != nil {
cmn.PanicCrisis(err)
}
// Actual listener local IP & port
listenerIP, listenerPort := splitHostPort(listener.Addr().String())
logger.Info("Local listener", "ip", listenerIP, "port", listenerPort)
// Determine internal address...
var intAddr *NetAddress
intAddr, err = NewNetAddressString(lAddr)
if err != nil {
cmn.PanicCrisis(err)
}
// Determine external address...
var extAddr *NetAddress
if !skipUPNP {
// If the lAddrIP is INADDR_ANY, try UPnP
if lAddrIP == "" || lAddrIP == "0.0.0.0" {
extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger)
}
}
// Otherwise just use the local address...
if extAddr == nil {
extAddr = getNaiveExternalAddress(listenerPort)
}
if extAddr == nil {
cmn.PanicCrisis("Could not determine external address!")
}
dl := &DefaultListener{
listener: listener,
intAddr: intAddr,
extAddr: extAddr,
connections: make(chan net.Conn, numBufferedConnections),
}
dl.BaseService = *cmn.NewBaseService(logger, "DefaultListener", dl)
dl.Start() // Started upon construction
return dl
}
func (l *DefaultListener) OnStart() error {
l.BaseService.OnStart()
go l.listenRoutine()
return nil
}
func (l *DefaultListener) OnStop() {
l.BaseService.OnStop()
l.listener.Close()
}
// Accept connections and pass on the channel
func (l *DefaultListener) listenRoutine() {
for {
conn, err := l.listener.Accept()
if !l.IsRunning() {
break // Go to cleanup
}
// listener wasn't stopped,
// yet we encountered an error.
if err != nil {
cmn.PanicCrisis(err)
}
l.connections <- conn
}
// Cleanup
close(l.connections)
for _ = range l.connections {
// Drain
}
}
// A channel of inbound connections.
// It gets closed when the listener closes.
func (l *DefaultListener) Connections() <-chan net.Conn {
return l.connections
}
func (l *DefaultListener) InternalAddress() *NetAddress {
return l.intAddr
}
func (l *DefaultListener) ExternalAddress() *NetAddress {
return l.extAddr
}
// NOTE: The returned listener is already Accept()'ing.
// So it's not suitable to pass into http.Serve().
func (l *DefaultListener) NetListener() net.Listener {
return l.listener
}
func (l *DefaultListener) String() string {
return fmt.Sprintf("Listener(@%v)", l.extAddr)
}
/* external address helpers */
// UPNP external address discovery & port mapping
func getUPNPExternalAddress(externalPort, internalPort int, logger log.Logger) *NetAddress {
logger.Info("Getting UPNP external address")
nat, err := upnp.Discover()
if err != nil {
logger.Info("Could not perform UPNP discover", "error", err)
return nil
}
ext, err := nat.GetExternalAddress()
if err != nil {
logger.Info("Could not get UPNP external address", "error", err)
return nil
}
// UPnP can't seem to get the external port, so let's just be explicit.
if externalPort == 0 {
externalPort = defaultExternalPort
}
externalPort, err = nat.AddPortMapping("tcp", externalPort, internalPort, "tendermint", 0)
if err != nil {
logger.Info("Could not add UPNP port mapping", "error", err)
return nil
}
logger.Info("Got UPNP external address", "address", ext)
return NewNetAddressIPPort(ext, uint16(externalPort))
}
// TODO: use syscalls: http://pastebin.com/9exZG4rh
func getNaiveExternalAddress(port int) *NetAddress {
addrs, err := net.InterfaceAddrs()
if err != nil {
cmn.PanicCrisis(cmn.Fmt("Could not fetch interface addresses: %v", err))
}
for _, a := range addrs {
ipnet, ok := a.(*net.IPNet)
if !ok {
continue
}
v4 := ipnet.IP.To4()
if v4 == nil || v4[0] == 127 {
continue
} // loopback
return NewNetAddressIPPort(ipnet.IP, uint16(port))
}
return nil
}

42
p2p/listener_test.go Normal file
View File

@@ -0,0 +1,42 @@
package p2p
import (
"bytes"
"testing"
"github.com/tendermint/tmlibs/log"
)
func TestListener(t *testing.T) {
// Create a listener
l := NewDefaultListener("tcp", ":8001", true, log.TestingLogger())
// Dial the listener
lAddr := l.ExternalAddress()
connOut, err := lAddr.Dial()
if err != nil {
t.Fatalf("Could not connect to listener address %v", lAddr)
} else {
t.Logf("Created a connection to listener address %v", lAddr)
}
connIn, ok := <-l.Connections()
if !ok {
t.Fatalf("Could not get inbound connection from listener")
}
msg := []byte("hi!")
go connIn.Write(msg)
b := make([]byte, 32)
n, err := connOut.Read(b)
if err != nil {
t.Fatalf("Error reading off connection: %v", err)
}
b = b[:n]
if !bytes.Equal(msg, b) {
t.Fatalf("Got %s, expected %s", b, msg)
}
// Close the server, no longer needed.
l.Stop()
}

253
p2p/netaddress.go Normal file
View File

@@ -0,0 +1,253 @@
// Modified for Tendermint
// Originally Copyright (c) 2013-2014 Conformal Systems LLC.
// https://github.com/conformal/btcd/blob/master/LICENSE
package p2p
import (
"errors"
"flag"
"net"
"strconv"
"time"
cmn "github.com/tendermint/tmlibs/common"
)
// NetAddress defines information about a peer on the network
// including its IP address, and port.
type NetAddress struct {
IP net.IP
Port uint16
str string
}
// NewNetAddress returns a new NetAddress using the provided TCP
// address. When testing, other net.Addr (except TCP) will result in
// using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will
// panic.
// TODO: socks proxies?
func NewNetAddress(addr net.Addr) *NetAddress {
tcpAddr, ok := addr.(*net.TCPAddr)
if !ok {
if flag.Lookup("test.v") == nil { // normal run
cmn.PanicSanity(cmn.Fmt("Only TCPAddrs are supported. Got: %v", addr))
} else { // in testing
return NewNetAddressIPPort(net.IP("0.0.0.0"), 0)
}
}
ip := tcpAddr.IP
port := uint16(tcpAddr.Port)
return NewNetAddressIPPort(ip, port)
}
// NewNetAddressString returns a new NetAddress using the provided
// address in the form of "IP:Port". Also resolves the host if host
// is not an IP.
func NewNetAddressString(addr string) (*NetAddress, error) {
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
ip := net.ParseIP(host)
if ip == nil {
if len(host) > 0 {
ips, err := net.LookupIP(host)
if err != nil {
return nil, err
}
ip = ips[0]
}
}
port, err := strconv.ParseUint(portStr, 10, 16)
if err != nil {
return nil, err
}
na := NewNetAddressIPPort(ip, uint16(port))
return na, nil
}
// NewNetAddressStrings returns an array of NetAddress'es build using
// the provided strings.
func NewNetAddressStrings(addrs []string) ([]*NetAddress, error) {
netAddrs := make([]*NetAddress, len(addrs))
for i, addr := range addrs {
netAddr, err := NewNetAddressString(addr)
if err != nil {
return nil, errors.New(cmn.Fmt("Error in address %s: %v", addr, err))
}
netAddrs[i] = netAddr
}
return netAddrs, nil
}
// NewNetAddressIPPort returns a new NetAddress using the provided IP
// and port number.
func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress {
na := &NetAddress{
IP: ip,
Port: port,
str: net.JoinHostPort(
ip.String(),
strconv.FormatUint(uint64(port), 10),
),
}
return na
}
// Equals reports whether na and other are the same addresses.
func (na *NetAddress) Equals(other interface{}) bool {
if o, ok := other.(*NetAddress); ok {
return na.String() == o.String()
}
return false
}
func (na *NetAddress) Less(other interface{}) bool {
if o, ok := other.(*NetAddress); ok {
return na.String() < o.String()
}
cmn.PanicSanity("Cannot compare unequal types")
return false
}
// String representation.
func (na *NetAddress) String() string {
if na.str == "" {
na.str = net.JoinHostPort(
na.IP.String(),
strconv.FormatUint(uint64(na.Port), 10),
)
}
return na.str
}
// Dial calls net.Dial on the address.
func (na *NetAddress) Dial() (net.Conn, error) {
conn, err := net.Dial("tcp", na.String())
if err != nil {
return nil, err
}
return conn, nil
}
// DialTimeout calls net.DialTimeout on the address.
func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) {
conn, err := net.DialTimeout("tcp", na.String(), timeout)
if err != nil {
return nil, err
}
return conn, nil
}
// Routable returns true if the address is routable.
func (na *NetAddress) Routable() bool {
// TODO(oga) bitcoind doesn't include RFC3849 here, but should we?
return na.Valid() && !(na.RFC1918() || na.RFC3927() || na.RFC4862() ||
na.RFC4193() || na.RFC4843() || na.Local())
}
// For IPv4 these are either a 0 or all bits set address. For IPv6 a zero
// address or one that matches the RFC3849 documentation address format.
func (na *NetAddress) Valid() bool {
return na.IP != nil && !(na.IP.IsUnspecified() || na.RFC3849() ||
na.IP.Equal(net.IPv4bcast))
}
// Local returns true if it is a local address.
func (na *NetAddress) Local() bool {
return na.IP.IsLoopback() || zero4.Contains(na.IP)
}
// ReachabilityTo checks whenever o can be reached from na.
func (na *NetAddress) ReachabilityTo(o *NetAddress) int {
const (
Unreachable = 0
Default = iota
Teredo
Ipv6_weak
Ipv4
Ipv6_strong
Private
)
if !na.Routable() {
return Unreachable
} else if na.RFC4380() {
if !o.Routable() {
return Default
} else if o.RFC4380() {
return Teredo
} else if o.IP.To4() != nil {
return Ipv4
} else { // ipv6
return Ipv6_weak
}
} else if na.IP.To4() != nil {
if o.Routable() && o.IP.To4() != nil {
return Ipv4
}
return Default
} else /* ipv6 */ {
var tunnelled bool
// Is our v6 is tunnelled?
if o.RFC3964() || o.RFC6052() || o.RFC6145() {
tunnelled = true
}
if !o.Routable() {
return Default
} else if o.RFC4380() {
return Teredo
} else if o.IP.To4() != nil {
return Ipv4
} else if tunnelled {
// only prioritise ipv6 if we aren't tunnelling it.
return Ipv6_weak
}
return Ipv6_strong
}
}
// RFC1918: IPv4 Private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12)
// RFC3849: IPv6 Documentation address (2001:0DB8::/32)
// RFC3927: IPv4 Autoconfig (169.254.0.0/16)
// RFC3964: IPv6 6to4 (2002::/16)
// RFC4193: IPv6 unique local (FC00::/7)
// RFC4380: IPv6 Teredo tunneling (2001::/32)
// RFC4843: IPv6 ORCHID: (2001:10::/28)
// RFC4862: IPv6 Autoconfig (FE80::/64)
// RFC6052: IPv6 well known prefix (64:FF9B::/96)
// RFC6145: IPv6 IPv4 translated address ::FFFF:0:0:0/96
var rfc1918_10 = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(8, 32)}
var rfc1918_192 = net.IPNet{IP: net.ParseIP("192.168.0.0"), Mask: net.CIDRMask(16, 32)}
var rfc1918_172 = net.IPNet{IP: net.ParseIP("172.16.0.0"), Mask: net.CIDRMask(12, 32)}
var rfc3849 = net.IPNet{IP: net.ParseIP("2001:0DB8::"), Mask: net.CIDRMask(32, 128)}
var rfc3927 = net.IPNet{IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)}
var rfc3964 = net.IPNet{IP: net.ParseIP("2002::"), Mask: net.CIDRMask(16, 128)}
var rfc4193 = net.IPNet{IP: net.ParseIP("FC00::"), Mask: net.CIDRMask(7, 128)}
var rfc4380 = net.IPNet{IP: net.ParseIP("2001::"), Mask: net.CIDRMask(32, 128)}
var rfc4843 = net.IPNet{IP: net.ParseIP("2001:10::"), Mask: net.CIDRMask(28, 128)}
var rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)}
var rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)}
var rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)}
var zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)}
func (na *NetAddress) RFC1918() bool {
return rfc1918_10.Contains(na.IP) ||
rfc1918_192.Contains(na.IP) ||
rfc1918_172.Contains(na.IP)
}
func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) }
func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) }
func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) }
func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) }
func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) }
func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) }
func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) }
func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) }
func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) }

114
p2p/netaddress_test.go Normal file
View File

@@ -0,0 +1,114 @@
package p2p
import (
"net"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewNetAddress(t *testing.T) {
assert, require := assert.New(t), require.New(t)
tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080")
require.Nil(err)
addr := NewNetAddress(tcpAddr)
assert.Equal("127.0.0.1:8080", addr.String())
assert.NotPanics(func() {
NewNetAddress(&net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000})
}, "Calling NewNetAddress with UDPAddr should not panic in testing")
}
func TestNewNetAddressString(t *testing.T) {
assert := assert.New(t)
tests := []struct {
addr string
correct bool
}{
{"127.0.0.1:8080", true},
// {"127.0.0:8080", false},
{"a", false},
{"127.0.0.1:a", false},
{"a:8080", false},
{"8082", false},
{"127.0.0:8080000", false},
}
for _, t := range tests {
addr, err := NewNetAddressString(t.addr)
if t.correct {
if assert.Nil(err, t.addr) {
assert.Equal(t.addr, addr.String())
}
} else {
assert.NotNil(err, t.addr)
}
}
}
func TestNewNetAddressStrings(t *testing.T) {
assert, require := assert.New(t), require.New(t)
addrs, err := NewNetAddressStrings([]string{"127.0.0.1:8080", "127.0.0.2:8080"})
require.Nil(err)
assert.Equal(2, len(addrs))
}
func TestNewNetAddressIPPort(t *testing.T) {
assert := assert.New(t)
addr := NewNetAddressIPPort(net.ParseIP("127.0.0.1"), 8080)
assert.Equal("127.0.0.1:8080", addr.String())
}
func TestNetAddressProperties(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// TODO add more test cases
tests := []struct {
addr string
valid bool
local bool
routable bool
}{
{"127.0.0.1:8080", true, true, false},
{"ya.ru:80", true, false, true},
}
for _, t := range tests {
addr, err := NewNetAddressString(t.addr)
require.Nil(err)
assert.Equal(t.valid, addr.Valid())
assert.Equal(t.local, addr.Local())
assert.Equal(t.routable, addr.Routable())
}
}
func TestNetAddressReachabilityTo(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// TODO add more test cases
tests := []struct {
addr string
other string
reachability int
}{
{"127.0.0.1:8080", "127.0.0.1:8081", 0},
{"ya.ru:80", "127.0.0.1:8080", 1},
}
for _, t := range tests {
addr, err := NewNetAddressString(t.addr)
require.Nil(err)
other, err := NewNetAddressString(t.other)
require.Nil(err)
assert.Equal(t.reachability, addr.ReachabilityTo(other))
}
}

303
p2p/peer.go Normal file
View File

@@ -0,0 +1,303 @@
package p2p
import (
"fmt"
"io"
"net"
"time"
"github.com/pkg/errors"
crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
)
// Peer could be marked as persistent, in which case you can use
// Redial function to reconnect. Note that inbound peers can't be
// made persistent. They should be made persistent on the other end.
//
// Before using a peer, you will need to perform a handshake on connection.
type Peer struct {
cmn.BaseService
outbound bool
conn net.Conn // source connection
mconn *MConnection // multiplex connection
persistent bool
config *PeerConfig
*NodeInfo
Key string
Data *cmn.CMap // User data.
}
// PeerConfig is a Peer configuration.
type PeerConfig struct {
AuthEnc bool `mapstructure:"auth_enc"` // authenticated encryption
// times are in seconds
HandshakeTimeout time.Duration `mapstructure:"handshake_timeout"`
DialTimeout time.Duration `mapstructure:"dial_timeout"`
MConfig *MConnConfig `mapstructure:"connection"`
Fuzz bool `mapstructure:"fuzz"` // fuzz connection (for testing)
FuzzConfig *FuzzConnConfig `mapstructure:"fuzz_config"`
}
// DefaultPeerConfig returns the default config.
func DefaultPeerConfig() *PeerConfig {
return &PeerConfig{
AuthEnc: true,
HandshakeTimeout: 20, // * time.Second,
DialTimeout: 3, // * time.Second,
MConfig: DefaultMConnConfig(),
Fuzz: false,
FuzzConfig: DefaultFuzzConnConfig(),
}
}
func newOutboundPeer(addr *NetAddress, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor, onPeerError func(*Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519) (*Peer, error) {
return newOutboundPeerWithConfig(addr, reactorsByCh, chDescs, onPeerError, ourNodePrivKey, DefaultPeerConfig())
}
func newOutboundPeerWithConfig(addr *NetAddress, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor, onPeerError func(*Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*Peer, error) {
conn, err := dial(addr, config)
if err != nil {
return nil, errors.Wrap(err, "Error creating peer")
}
peer, err := newPeerFromConnAndConfig(conn, true, reactorsByCh, chDescs, onPeerError, ourNodePrivKey, config)
if err != nil {
conn.Close()
return nil, err
}
return peer, nil
}
func newInboundPeer(conn net.Conn, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor, onPeerError func(*Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519) (*Peer, error) {
return newInboundPeerWithConfig(conn, reactorsByCh, chDescs, onPeerError, ourNodePrivKey, DefaultPeerConfig())
}
func newInboundPeerWithConfig(conn net.Conn, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor, onPeerError func(*Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*Peer, error) {
return newPeerFromConnAndConfig(conn, false, reactorsByCh, chDescs, onPeerError, ourNodePrivKey, config)
}
func newPeerFromConnAndConfig(rawConn net.Conn, outbound bool, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor, onPeerError func(*Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*Peer, error) {
conn := rawConn
// Fuzz connection
if config.Fuzz {
// so we have time to do peer handshakes and get set up
conn = FuzzConnAfterFromConfig(conn, 10*time.Second, config.FuzzConfig)
}
// Encrypt connection
if config.AuthEnc {
conn.SetDeadline(time.Now().Add(config.HandshakeTimeout * time.Second))
var err error
conn, err = MakeSecretConnection(conn, ourNodePrivKey)
if err != nil {
return nil, errors.Wrap(err, "Error creating peer")
}
}
// Key and NodeInfo are set after Handshake
p := &Peer{
outbound: outbound,
conn: conn,
config: config,
Data: cmn.NewCMap(),
}
p.mconn = createMConnection(conn, p, reactorsByCh, chDescs, onPeerError, config.MConfig)
p.BaseService = *cmn.NewBaseService(nil, "Peer", p)
return p, nil
}
// CloseConn should be used when the peer was created, but never started.
func (p *Peer) CloseConn() {
p.conn.Close()
}
// makePersistent marks the peer as persistent.
func (p *Peer) makePersistent() {
if !p.outbound {
panic("inbound peers can't be made persistent")
}
p.persistent = true
}
// IsPersistent returns true if the peer is persitent, false otherwise.
func (p *Peer) IsPersistent() bool {
return p.persistent
}
// HandshakeTimeout performs a handshake between a given node and the peer.
// NOTE: blocking
func (p *Peer) HandshakeTimeout(ourNodeInfo *NodeInfo, timeout time.Duration) error {
// Set deadline for handshake so we don't block forever on conn.ReadFull
p.conn.SetDeadline(time.Now().Add(timeout))
var peerNodeInfo = new(NodeInfo)
var err1 error
var err2 error
cmn.Parallel(
func() {
var n int
wire.WriteBinary(ourNodeInfo, p.conn, &n, &err1)
},
func() {
var n int
wire.ReadBinary(peerNodeInfo, p.conn, maxNodeInfoSize, &n, &err2)
p.Logger.Info("Peer handshake", "peerNodeInfo", peerNodeInfo)
})
if err1 != nil {
return errors.Wrap(err1, "Error during handshake/write")
}
if err2 != nil {
return errors.Wrap(err2, "Error during handshake/read")
}
if p.config.AuthEnc {
// Check that the professed PubKey matches the sconn's.
if !peerNodeInfo.PubKey.Equals(p.PubKey().Wrap()) {
return fmt.Errorf("Ignoring connection with unmatching pubkey: %v vs %v",
peerNodeInfo.PubKey, p.PubKey())
}
}
// Remove deadline
p.conn.SetDeadline(time.Time{})
peerNodeInfo.RemoteAddr = p.Addr().String()
p.NodeInfo = peerNodeInfo
p.Key = peerNodeInfo.PubKey.KeyString()
return nil
}
// Addr returns peer's remote network address.
func (p *Peer) Addr() net.Addr {
return p.conn.RemoteAddr()
}
// PubKey returns peer's public key.
func (p *Peer) PubKey() crypto.PubKeyEd25519 {
if p.config.AuthEnc {
return p.conn.(*SecretConnection).RemotePubKey()
}
if p.NodeInfo == nil {
panic("Attempt to get peer's PubKey before calling Handshake")
}
return p.PubKey()
}
// OnStart implements BaseService.
func (p *Peer) OnStart() error {
p.BaseService.OnStart()
_, err := p.mconn.Start()
return err
}
// OnStop implements BaseService.
func (p *Peer) OnStop() {
p.BaseService.OnStop()
p.mconn.Stop()
}
// Connection returns underlying MConnection.
func (p *Peer) Connection() *MConnection {
return p.mconn
}
// IsOutbound returns true if the connection is outbound, false otherwise.
func (p *Peer) IsOutbound() bool {
return p.outbound
}
// Send msg to the channel identified by chID byte. Returns false if the send
// queue is full after timeout, specified by MConnection.
func (p *Peer) Send(chID byte, msg interface{}) bool {
if !p.IsRunning() {
// see Switch#Broadcast, where we fetch the list of peers and loop over
// them - while we're looping, one peer may be removed and stopped.
return false
}
return p.mconn.Send(chID, msg)
}
// TrySend msg to the channel identified by chID byte. Immediately returns
// false if the send queue is full.
func (p *Peer) TrySend(chID byte, msg interface{}) bool {
if !p.IsRunning() {
return false
}
return p.mconn.TrySend(chID, msg)
}
// CanSend returns true if the send queue is not full, false otherwise.
func (p *Peer) CanSend(chID byte) bool {
if !p.IsRunning() {
return false
}
return p.mconn.CanSend(chID)
}
// WriteTo writes the peer's public key to w.
func (p *Peer) WriteTo(w io.Writer) (n int64, err error) {
var n_ int
wire.WriteString(p.Key, w, &n_, &err)
n += int64(n_)
return
}
// String representation.
func (p *Peer) String() string {
if p.outbound {
return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.Key[:12])
}
return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.Key[:12])
}
// Equals reports whenever 2 peers are actually represent the same node.
func (p *Peer) Equals(other *Peer) bool {
return p.Key == other.Key
}
// Get the data for a given key.
func (p *Peer) Get(key string) interface{} {
return p.Data.Get(key)
}
func dial(addr *NetAddress, config *PeerConfig) (net.Conn, error) {
conn, err := addr.DialTimeout(config.DialTimeout * time.Second)
if err != nil {
return nil, err
}
return conn, nil
}
func createMConnection(conn net.Conn, p *Peer, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor, onPeerError func(*Peer, interface{}), config *MConnConfig) *MConnection {
onReceive := func(chID byte, msgBytes []byte) {
reactor := reactorsByCh[chID]
if reactor == nil {
cmn.PanicSanity(cmn.Fmt("Unknown channel %X", chID))
}
reactor.Receive(chID, p, msgBytes)
}
onError := func(r interface{}) {
onPeerError(p, r)
}
return NewMConnectionWithConfig(conn, chDescs, onReceive, onError, config)
}

113
p2p/peer_set.go Normal file
View File

@@ -0,0 +1,113 @@
package p2p
import (
"sync"
)
// IPeerSet has a (immutable) subset of the methods of PeerSet.
type IPeerSet interface {
Has(key string) bool
Get(key string) *Peer
List() []*Peer
Size() int
}
//-----------------------------------------------------------------------------
// PeerSet is a special structure for keeping a table of peers.
// Iteration over the peers is super fast and thread-safe.
type PeerSet struct {
mtx sync.Mutex
lookup map[string]*peerSetItem
list []*Peer
}
type peerSetItem struct {
peer *Peer
index int
}
func NewPeerSet() *PeerSet {
return &PeerSet{
lookup: make(map[string]*peerSetItem),
list: make([]*Peer, 0, 256),
}
}
// Returns false if peer with key (PubKeyEd25519) is already set
func (ps *PeerSet) Add(peer *Peer) error {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if ps.lookup[peer.Key] != nil {
return ErrSwitchDuplicatePeer
}
index := len(ps.list)
// Appending is safe even with other goroutines
// iterating over the ps.list slice.
ps.list = append(ps.list, peer)
ps.lookup[peer.Key] = &peerSetItem{peer, index}
return nil
}
func (ps *PeerSet) Has(peerKey string) bool {
ps.mtx.Lock()
defer ps.mtx.Unlock()
_, ok := ps.lookup[peerKey]
return ok
}
func (ps *PeerSet) Get(peerKey string) *Peer {
ps.mtx.Lock()
defer ps.mtx.Unlock()
item, ok := ps.lookup[peerKey]
if ok {
return item.peer
} else {
return nil
}
}
func (ps *PeerSet) Remove(peer *Peer) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
item := ps.lookup[peer.Key]
if item == nil {
return
}
index := item.index
// Copy the list but without the last element.
// (we must copy because we're mutating the list)
newList := make([]*Peer, len(ps.list)-1)
copy(newList, ps.list)
// If it's the last peer, that's an easy special case.
if index == len(ps.list)-1 {
ps.list = newList
delete(ps.lookup, peer.Key)
return
}
// Move the last item from ps.list to "index" in list.
lastPeer := ps.list[len(ps.list)-1]
lastPeerKey := lastPeer.Key
lastPeerItem := ps.lookup[lastPeerKey]
newList[index] = lastPeer
lastPeerItem.index = index
ps.list = newList
delete(ps.lookup, peer.Key)
}
func (ps *PeerSet) Size() int {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return len(ps.list)
}
// threadsafe list of peers.
func (ps *PeerSet) List() []*Peer {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return ps.list
}

67
p2p/peer_set_test.go Normal file
View File

@@ -0,0 +1,67 @@
package p2p
import (
"math/rand"
"testing"
cmn "github.com/tendermint/tmlibs/common"
)
// Returns an empty dummy peer
func randPeer() *Peer {
return &Peer{
Key: cmn.RandStr(12),
NodeInfo: &NodeInfo{
RemoteAddr: cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256),
ListenAddr: cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256),
},
}
}
func TestAddRemoveOne(t *testing.T) {
peerSet := NewPeerSet()
peer := randPeer()
err := peerSet.Add(peer)
if err != nil {
t.Errorf("Failed to add new peer")
}
if peerSet.Size() != 1 {
t.Errorf("Failed to add new peer and increment size")
}
peerSet.Remove(peer)
if peerSet.Has(peer.Key) {
t.Errorf("Failed to remove peer")
}
if peerSet.Size() != 0 {
t.Errorf("Failed to remove peer and decrement size")
}
}
func TestAddRemoveMany(t *testing.T) {
peerSet := NewPeerSet()
peers := []*Peer{}
N := 100
for i := 0; i < N; i++ {
peer := randPeer()
if err := peerSet.Add(peer); err != nil {
t.Errorf("Failed to add new peer")
}
if peerSet.Size() != i+1 {
t.Errorf("Failed to add new peer and increment size")
}
peers = append(peers, peer)
}
for i, peer := range peers {
peerSet.Remove(peer)
if peerSet.Has(peer.Key) {
t.Errorf("Failed to remove peer")
}
if peerSet.Size() != len(peers)-i-1 {
t.Errorf("Failed to remove peer and decrement size")
}
}
}

156
p2p/peer_test.go Normal file
View File

@@ -0,0 +1,156 @@
package p2p
import (
golog "log"
"net"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
crypto "github.com/tendermint/go-crypto"
)
func TestPeerBasic(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// simulate remote peer
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: DefaultPeerConfig()}
rp.Start()
defer rp.Stop()
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), DefaultPeerConfig())
require.Nil(err)
p.Start()
defer p.Stop()
assert.True(p.IsRunning())
assert.True(p.IsOutbound())
assert.False(p.IsPersistent())
p.makePersistent()
assert.True(p.IsPersistent())
assert.Equal(rp.Addr().String(), p.Addr().String())
assert.Equal(rp.PubKey(), p.PubKey())
}
func TestPeerWithoutAuthEnc(t *testing.T) {
assert, require := assert.New(t), require.New(t)
config := DefaultPeerConfig()
config.AuthEnc = false
// simulate remote peer
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config}
rp.Start()
defer rp.Stop()
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config)
require.Nil(err)
p.Start()
defer p.Stop()
assert.True(p.IsRunning())
}
func TestPeerSend(t *testing.T) {
assert, require := assert.New(t), require.New(t)
config := DefaultPeerConfig()
config.AuthEnc = false
// simulate remote peer
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config}
rp.Start()
defer rp.Stop()
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config)
require.Nil(err)
p.Start()
defer p.Stop()
assert.True(p.CanSend(0x01))
assert.True(p.Send(0x01, "Asylum"))
}
func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig) (*Peer, error) {
chDescs := []*ChannelDescriptor{
&ChannelDescriptor{ID: 0x01, Priority: 1},
}
reactorsByCh := map[byte]Reactor{0x01: NewTestReactor(chDescs, true)}
pk := crypto.GenPrivKeyEd25519()
p, err := newOutboundPeerWithConfig(addr, reactorsByCh, chDescs, func(p *Peer, r interface{}) {}, pk, config)
if err != nil {
return nil, err
}
err = p.HandshakeTimeout(&NodeInfo{
PubKey: pk.PubKey().Unwrap().(crypto.PubKeyEd25519),
Moniker: "host_peer",
Network: "testing",
Version: "123.123.123",
}, 1*time.Second)
if err != nil {
return nil, err
}
return p, nil
}
type remotePeer struct {
PrivKey crypto.PrivKeyEd25519
Config *PeerConfig
addr *NetAddress
quit chan struct{}
}
func (p *remotePeer) Addr() *NetAddress {
return p.addr
}
func (p *remotePeer) PubKey() crypto.PubKeyEd25519 {
return p.PrivKey.PubKey().Unwrap().(crypto.PubKeyEd25519)
}
func (p *remotePeer) Start() {
l, e := net.Listen("tcp", "127.0.0.1:0") // any available address
if e != nil {
golog.Fatalf("net.Listen tcp :0: %+v", e)
}
p.addr = NewNetAddress(l.Addr())
p.quit = make(chan struct{})
go p.accept(l)
}
func (p *remotePeer) Stop() {
close(p.quit)
}
func (p *remotePeer) accept(l net.Listener) {
for {
conn, err := l.Accept()
if err != nil {
golog.Fatalf("Failed to accept conn: %+v", err)
}
peer, err := newInboundPeerWithConfig(conn, make(map[byte]Reactor), make([]*ChannelDescriptor, 0), func(p *Peer, r interface{}) {}, p.PrivKey, p.Config)
if err != nil {
golog.Fatalf("Failed to create a peer: %+v", err)
}
err = peer.HandshakeTimeout(&NodeInfo{
PubKey: p.PrivKey.PubKey().Unwrap().(crypto.PubKeyEd25519),
Moniker: "remote_peer",
Network: "testing",
Version: "123.123.123",
}, 1*time.Second)
if err != nil {
golog.Fatalf("Failed to perform handshake: %+v", err)
}
select {
case <-p.quit:
conn.Close()
return
default:
}
}
}

358
p2p/pex_reactor.go Normal file
View File

@@ -0,0 +1,358 @@
package p2p
import (
"bytes"
"fmt"
"math/rand"
"reflect"
"time"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
)
const (
// PexChannel is a channel for PEX messages
PexChannel = byte(0x00)
// period to ensure peers connected
defaultEnsurePeersPeriod = 30 * time.Second
minNumOutboundPeers = 10
maxPexMessageSize = 1048576 // 1MB
// maximum messages one peer can send to us during `msgCountByPeerFlushInterval`
defaultMaxMsgCountByPeer = 1000
msgCountByPeerFlushInterval = 1 * time.Hour
)
// PEXReactor handles PEX (peer exchange) and ensures that an
// adequate number of peers are connected to the switch.
//
// It uses `AddrBook` (address book) to store `NetAddress`es of the peers.
//
// ## Preventing abuse
//
// For now, it just limits the number of messages from one peer to
// `defaultMaxMsgCountByPeer` messages per `msgCountByPeerFlushInterval` (1000
// msg/hour).
//
// NOTE [2017-01-17]:
// Limiting is fine for now. Maybe down the road we want to keep track of the
// quality of peer messages so if peerA keeps telling us about peers we can't
// connect to then maybe we should care less about peerA. But I don't think
// that kind of complexity is priority right now.
type PEXReactor struct {
BaseReactor
sw *Switch
book *AddrBook
ensurePeersPeriod time.Duration
// tracks message count by peer, so we can prevent abuse
msgCountByPeer *cmn.CMap
maxMsgCountByPeer uint16
}
// NewPEXReactor creates new PEX reactor.
func NewPEXReactor(b *AddrBook) *PEXReactor {
r := &PEXReactor{
book: b,
ensurePeersPeriod: defaultEnsurePeersPeriod,
msgCountByPeer: cmn.NewCMap(),
maxMsgCountByPeer: defaultMaxMsgCountByPeer,
}
r.BaseReactor = *NewBaseReactor("PEXReactor", r)
return r
}
// OnStart implements BaseService
func (r *PEXReactor) OnStart() error {
r.BaseReactor.OnStart()
r.book.Start()
go r.ensurePeersRoutine()
go r.flushMsgCountByPeer()
return nil
}
// OnStop implements BaseService
func (r *PEXReactor) OnStop() {
r.BaseReactor.OnStop()
r.book.Stop()
}
// GetChannels implements Reactor
func (r *PEXReactor) GetChannels() []*ChannelDescriptor {
return []*ChannelDescriptor{
&ChannelDescriptor{
ID: PexChannel,
Priority: 1,
SendQueueCapacity: 10,
},
}
}
// AddPeer implements Reactor by adding peer to the address book (if inbound)
// or by requesting more addresses (if outbound).
func (r *PEXReactor) AddPeer(p *Peer) {
if p.IsOutbound() {
// For outbound peers, the address is already in the books.
// Either it was added in DialSeeds or when we
// received the peer's address in r.Receive
if r.book.NeedMoreAddrs() {
r.RequestPEX(p)
}
} else { // For inbound connections, the peer is its own source
addr, err := NewNetAddressString(p.ListenAddr)
if err != nil {
// this should never happen
r.Logger.Error("Error in AddPeer: invalid peer address", "addr", p.ListenAddr, "error", err)
return
}
r.book.AddAddress(addr, addr)
}
}
// RemovePeer implements Reactor.
func (r *PEXReactor) RemovePeer(p *Peer, reason interface{}) {
// If we aren't keeping track of local temp data for each peer here, then we
// don't have to do anything.
}
// Receive implements Reactor by handling incoming PEX messages.
func (r *PEXReactor) Receive(chID byte, src *Peer, msgBytes []byte) {
srcAddr := src.Connection().RemoteAddress
srcAddrStr := srcAddr.String()
r.IncrementMsgCountForPeer(srcAddrStr)
if r.ReachedMaxMsgCountForPeer(srcAddrStr) {
r.Logger.Error("Maximum number of messages reached for peer", "peer", srcAddrStr)
// TODO remove src from peers?
return
}
_, msg, err := DecodeMessage(msgBytes)
if err != nil {
r.Logger.Error("Error decoding message", "error", err)
return
}
r.Logger.Info("Received message", "msg", msg)
switch msg := msg.(type) {
case *pexRequestMessage:
// src requested some peers.
r.SendAddrs(src, r.book.GetSelection())
case *pexAddrsMessage:
// We received some peer addresses from src.
// (We don't want to get spammed with bad peers)
for _, addr := range msg.Addrs {
if addr != nil {
r.book.AddAddress(addr, srcAddr)
}
}
default:
r.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
}
// RequestPEX asks peer for more addresses.
func (r *PEXReactor) RequestPEX(p *Peer) {
p.Send(PexChannel, struct{ PexMessage }{&pexRequestMessage{}})
}
// SendAddrs sends addrs to the peer.
func (r *PEXReactor) SendAddrs(p *Peer, addrs []*NetAddress) {
p.Send(PexChannel, struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}})
}
// SetEnsurePeersPeriod sets period to ensure peers connected.
func (r *PEXReactor) SetEnsurePeersPeriod(d time.Duration) {
r.ensurePeersPeriod = d
}
// SetMaxMsgCountByPeer sets maximum messages one peer can send to us during 'msgCountByPeerFlushInterval'.
func (r *PEXReactor) SetMaxMsgCountByPeer(v uint16) {
r.maxMsgCountByPeer = v
}
// ReachedMaxMsgCountForPeer returns true if we received too many
// messages from peer with address `addr`.
// NOTE: assumes the value in the CMap is non-nil
func (r *PEXReactor) ReachedMaxMsgCountForPeer(addr string) bool {
return r.msgCountByPeer.Get(addr).(uint16) >= r.maxMsgCountByPeer
}
// Increment or initialize the msg count for the peer in the CMap
func (r *PEXReactor) IncrementMsgCountForPeer(addr string) {
var count uint16
countI := r.msgCountByPeer.Get(addr)
if countI != nil {
count = countI.(uint16)
}
count++
r.msgCountByPeer.Set(addr, count)
}
// Ensures that sufficient peers are connected. (continuous)
func (r *PEXReactor) ensurePeersRoutine() {
// Randomize when routine starts
ensurePeersPeriodMs := r.ensurePeersPeriod.Nanoseconds() / 1e6
time.Sleep(time.Duration(rand.Int63n(ensurePeersPeriodMs)) * time.Millisecond)
// fire once immediately.
r.ensurePeers()
// fire periodically
ticker := time.NewTicker(r.ensurePeersPeriod)
for {
select {
case <-ticker.C:
r.ensurePeers()
case <-r.Quit:
ticker.Stop()
return
}
}
}
// ensurePeers ensures that sufficient peers are connected. (once)
//
// Old bucket / New bucket are arbitrary categories to denote whether an
// address is vetted or not, and this needs to be determined over time via a
// heuristic that we haven't perfected yet, or, perhaps is manually edited by
// the node operator. It should not be used to compute what addresses are
// already connected or not.
//
// TODO Basically, we need to work harder on our good-peer/bad-peer marking.
// What we're currently doing in terms of marking good/bad peers is just a
// placeholder. It should not be the case that an address becomes old/vetted
// upon a single successful connection.
func (r *PEXReactor) ensurePeers() {
numOutPeers, _, numDialing := r.Switch.NumPeers()
numToDial := minNumOutboundPeers - (numOutPeers + numDialing)
r.Logger.Info("Ensure peers", "numOutPeers", numOutPeers, "numDialing", numDialing, "numToDial", numToDial)
if numToDial <= 0 {
return
}
toDial := make(map[string]*NetAddress)
// Try to pick numToDial addresses to dial.
for i := 0; i < numToDial; i++ {
// The purpose of newBias is to first prioritize old (more vetted) peers
// when we have few connections, but to allow for new (less vetted) peers
// if we already have many connections. This algorithm isn't perfect, but
// it somewhat ensures that we prioritize connecting to more-vetted
// peers.
newBias := cmn.MinInt(numOutPeers, 8)*10 + 10
var picked *NetAddress
// Try to fetch a new peer 3 times.
// This caps the maximum number of tries to 3 * numToDial.
for j := 0; j < 3; j++ {
try := r.book.PickAddress(newBias)
if try == nil {
break
}
_, alreadySelected := toDial[try.IP.String()]
alreadyDialing := r.Switch.IsDialing(try)
alreadyConnected := r.Switch.Peers().Has(try.IP.String())
if alreadySelected || alreadyDialing || alreadyConnected {
// r.Logger.Info("Cannot dial address", "addr", try,
// "alreadySelected", alreadySelected,
// "alreadyDialing", alreadyDialing,
// "alreadyConnected", alreadyConnected)
continue
} else {
r.Logger.Info("Will dial address", "addr", try)
picked = try
break
}
}
if picked == nil {
continue
}
toDial[picked.IP.String()] = picked
}
// Dial picked addresses
for _, item := range toDial {
go func(picked *NetAddress) {
_, err := r.Switch.DialPeerWithAddress(picked, false)
if err != nil {
r.book.MarkAttempt(picked)
}
}(item)
}
// If we need more addresses, pick a random peer and ask for more.
if r.book.NeedMoreAddrs() {
if peers := r.Switch.Peers().List(); len(peers) > 0 {
i := rand.Int() % len(peers)
peer := peers[i]
r.Logger.Info("No addresses to dial. Sending pexRequest to random peer", "peer", peer)
r.RequestPEX(peer)
}
}
}
func (r *PEXReactor) flushMsgCountByPeer() {
ticker := time.NewTicker(msgCountByPeerFlushInterval)
for {
select {
case <-ticker.C:
r.msgCountByPeer.Clear()
case <-r.Quit:
ticker.Stop()
return
}
}
}
//-----------------------------------------------------------------------------
// Messages
const (
msgTypeRequest = byte(0x01)
msgTypeAddrs = byte(0x02)
)
// PexMessage is a primary type for PEX messages. Underneath, it could contain
// either pexRequestMessage, or pexAddrsMessage messages.
type PexMessage interface{}
var _ = wire.RegisterInterface(
struct{ PexMessage }{},
wire.ConcreteType{&pexRequestMessage{}, msgTypeRequest},
wire.ConcreteType{&pexAddrsMessage{}, msgTypeAddrs},
)
// DecodeMessage implements interface registered above.
func DecodeMessage(bz []byte) (msgType byte, msg PexMessage, err error) {
msgType = bz[0]
n := new(int)
r := bytes.NewReader(bz)
msg = wire.ReadBinary(struct{ PexMessage }{}, r, maxPexMessageSize, n, &err).(struct{ PexMessage }).PexMessage
return
}
/*
A pexRequestMessage requests additional peer addresses.
*/
type pexRequestMessage struct {
}
func (m *pexRequestMessage) String() string {
return "[pexRequest]"
}
/*
A message with announced peer addresses.
*/
type pexAddrsMessage struct {
Addrs []*NetAddress
}
func (m *pexAddrsMessage) String() string {
return fmt.Sprintf("[pexAddrs %v]", m.Addrs)
}

178
p2p/pex_reactor_test.go Normal file
View File

@@ -0,0 +1,178 @@
package p2p
import (
"io/ioutil"
"math/rand"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
)
func TestPEXReactorBasic(t *testing.T) {
assert, require := assert.New(t), require.New(t)
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err)
defer os.RemoveAll(dir)
book := NewAddrBook(dir+"addrbook.json", true)
book.SetLogger(log.TestingLogger())
r := NewPEXReactor(book)
r.SetLogger(log.TestingLogger())
assert.NotNil(r)
assert.NotEmpty(r.GetChannels())
}
func TestPEXReactorAddRemovePeer(t *testing.T) {
assert, require := assert.New(t), require.New(t)
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err)
defer os.RemoveAll(dir)
book := NewAddrBook(dir+"addrbook.json", true)
book.SetLogger(log.TestingLogger())
r := NewPEXReactor(book)
r.SetLogger(log.TestingLogger())
size := book.Size()
peer := createRandomPeer(false)
r.AddPeer(peer)
assert.Equal(size+1, book.Size())
r.RemovePeer(peer, "peer not available")
assert.Equal(size+1, book.Size())
outboundPeer := createRandomPeer(true)
r.AddPeer(outboundPeer)
assert.Equal(size+1, book.Size(), "outbound peers should not be added to the address book")
r.RemovePeer(outboundPeer, "peer not available")
assert.Equal(size+1, book.Size())
}
func TestPEXReactorRunning(t *testing.T) {
require := require.New(t)
N := 3
switches := make([]*Switch, N)
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err)
defer os.RemoveAll(dir)
book := NewAddrBook(dir+"addrbook.json", false)
book.SetLogger(log.TestingLogger())
// create switches
for i := 0; i < N; i++ {
switches[i] = makeSwitch(config, i, "127.0.0.1", "123.123.123", func(i int, sw *Switch) *Switch {
sw.SetLogger(log.TestingLogger().With("switch", i))
r := NewPEXReactor(book)
r.SetLogger(log.TestingLogger())
r.SetEnsurePeersPeriod(250 * time.Millisecond)
sw.AddReactor("pex", r)
return sw
})
}
// fill the address book and add listeners
for _, s := range switches {
addr, _ := NewNetAddressString(s.NodeInfo().ListenAddr)
book.AddAddress(addr, addr)
s.AddListener(NewDefaultListener("tcp", s.NodeInfo().ListenAddr, true, log.TestingLogger()))
}
// start switches
for _, s := range switches {
_, err := s.Start() // start switch and reactors
require.Nil(err)
}
time.Sleep(1 * time.Second)
// check peers are connected after some time
for _, s := range switches {
outbound, inbound, _ := s.NumPeers()
if outbound+inbound == 0 {
t.Errorf("%v expected to be connected to at least one peer", s.NodeInfo().ListenAddr)
}
}
// stop them
for _, s := range switches {
s.Stop()
}
}
func TestPEXReactorReceive(t *testing.T) {
assert, require := assert.New(t), require.New(t)
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err)
defer os.RemoveAll(dir)
book := NewAddrBook(dir+"addrbook.json", true)
book.SetLogger(log.TestingLogger())
r := NewPEXReactor(book)
r.SetLogger(log.TestingLogger())
peer := createRandomPeer(false)
size := book.Size()
netAddr, _ := NewNetAddressString(peer.ListenAddr)
addrs := []*NetAddress{netAddr}
msg := wire.BinaryBytes(struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}})
r.Receive(PexChannel, peer, msg)
assert.Equal(size+1, book.Size())
msg = wire.BinaryBytes(struct{ PexMessage }{&pexRequestMessage{}})
r.Receive(PexChannel, peer, msg)
}
func TestPEXReactorAbuseFromPeer(t *testing.T) {
assert, require := assert.New(t), require.New(t)
dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err)
defer os.RemoveAll(dir)
book := NewAddrBook(dir+"addrbook.json", true)
book.SetLogger(log.TestingLogger())
r := NewPEXReactor(book)
r.SetLogger(log.TestingLogger())
r.SetMaxMsgCountByPeer(5)
peer := createRandomPeer(false)
msg := wire.BinaryBytes(struct{ PexMessage }{&pexRequestMessage{}})
for i := 0; i < 10; i++ {
r.Receive(PexChannel, peer, msg)
}
assert.True(r.ReachedMaxMsgCountForPeer(peer.ListenAddr))
}
func createRandomPeer(outbound bool) *Peer {
addr := cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256)
netAddr, _ := NewNetAddressString(addr)
p := &Peer{
Key: cmn.RandStr(12),
NodeInfo: &NodeInfo{
ListenAddr: addr,
},
outbound: outbound,
mconn: &MConnection{RemoteAddress: netAddr},
}
p.SetLogger(log.TestingLogger().With("peer", addr))
return p
}

346
p2p/secret_connection.go Normal file
View File

@@ -0,0 +1,346 @@
// Uses nacl's secret_box to encrypt a net.Conn.
// It is (meant to be) an implementation of the STS protocol.
// Note we do not (yet) assume that a remote peer's pubkey
// is known ahead of time, and thus we are technically
// still vulnerable to MITM. (TODO!)
// See docs/sts-final.pdf for more info
package p2p
import (
"bytes"
crand "crypto/rand"
"crypto/sha256"
"encoding/binary"
"errors"
"io"
"net"
"time"
"golang.org/x/crypto/nacl/box"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/ripemd160"
"github.com/tendermint/go-crypto"
"github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
)
// 2 + 1024 == 1026 total frame size
const dataLenSize = 2 // uint16 to describe the length, is <= dataMaxSize
const dataMaxSize = 1024
const totalFrameSize = dataMaxSize + dataLenSize
const sealedFrameSize = totalFrameSize + secretbox.Overhead
const authSigMsgSize = (32 + 1) + (64 + 1) // fixed size (length prefixed) byte arrays
// Implements net.Conn
type SecretConnection struct {
conn io.ReadWriteCloser
recvBuffer []byte
recvNonce *[24]byte
sendNonce *[24]byte
remPubKey crypto.PubKeyEd25519
shrSecret *[32]byte // shared secret
}
// Performs handshake and returns a new authenticated SecretConnection.
// Returns nil if error in handshake.
// Caller should call conn.Close()
// See docs/sts-final.pdf for more information.
func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKeyEd25519) (*SecretConnection, error) {
locPubKey := locPrivKey.PubKey().Unwrap().(crypto.PubKeyEd25519)
// Generate ephemeral keys for perfect forward secrecy.
locEphPub, locEphPriv := genEphKeys()
// Write local ephemeral pubkey and receive one too.
// NOTE: every 32-byte string is accepted as a Curve25519 public key
// (see DJB's Curve25519 paper: http://cr.yp.to/ecdh/curve25519-20060209.pdf)
remEphPub, err := shareEphPubKey(conn, locEphPub)
if err != nil {
return nil, err
}
// Compute common shared secret.
shrSecret := computeSharedSecret(remEphPub, locEphPriv)
// Sort by lexical order.
loEphPub, hiEphPub := sort32(locEphPub, remEphPub)
// Generate nonces to use for secretbox.
recvNonce, sendNonce := genNonces(loEphPub, hiEphPub, locEphPub == loEphPub)
// Generate common challenge to sign.
challenge := genChallenge(loEphPub, hiEphPub)
// Construct SecretConnection.
sc := &SecretConnection{
conn: conn,
recvBuffer: nil,
recvNonce: recvNonce,
sendNonce: sendNonce,
shrSecret: shrSecret,
}
// Sign the challenge bytes for authentication.
locSignature := signChallenge(challenge, locPrivKey)
// Share (in secret) each other's pubkey & challenge signature
authSigMsg, err := shareAuthSignature(sc, locPubKey, locSignature)
if err != nil {
return nil, err
}
remPubKey, remSignature := authSigMsg.Key, authSigMsg.Sig
if !remPubKey.VerifyBytes(challenge[:], remSignature) {
return nil, errors.New("Challenge verification failed")
}
// We've authorized.
sc.remPubKey = remPubKey.Unwrap().(crypto.PubKeyEd25519)
return sc, nil
}
// Returns authenticated remote pubkey
func (sc *SecretConnection) RemotePubKey() crypto.PubKeyEd25519 {
return sc.remPubKey
}
// Writes encrypted frames of `sealedFrameSize`
// CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Write(data []byte) (n int, err error) {
for 0 < len(data) {
var frame []byte = make([]byte, totalFrameSize)
var chunk []byte
if dataMaxSize < len(data) {
chunk = data[:dataMaxSize]
data = data[dataMaxSize:]
} else {
chunk = data
data = nil
}
chunkLength := len(chunk)
binary.BigEndian.PutUint16(frame, uint16(chunkLength))
copy(frame[dataLenSize:], chunk)
// encrypt the frame
var sealedFrame = make([]byte, sealedFrameSize)
secretbox.Seal(sealedFrame[:0], frame, sc.sendNonce, sc.shrSecret)
// fmt.Printf("secretbox.Seal(sealed:%X,sendNonce:%X,shrSecret:%X\n", sealedFrame, sc.sendNonce, sc.shrSecret)
incr2Nonce(sc.sendNonce)
// end encryption
_, err := sc.conn.Write(sealedFrame)
if err != nil {
return n, err
} else {
n += len(chunk)
}
}
return
}
// CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Read(data []byte) (n int, err error) {
if 0 < len(sc.recvBuffer) {
n_ := copy(data, sc.recvBuffer)
sc.recvBuffer = sc.recvBuffer[n_:]
return
}
sealedFrame := make([]byte, sealedFrameSize)
_, err = io.ReadFull(sc.conn, sealedFrame)
if err != nil {
return
}
// decrypt the frame
var frame = make([]byte, totalFrameSize)
// fmt.Printf("secretbox.Open(sealed:%X,recvNonce:%X,shrSecret:%X\n", sealedFrame, sc.recvNonce, sc.shrSecret)
_, ok := secretbox.Open(frame[:0], sealedFrame, sc.recvNonce, sc.shrSecret)
if !ok {
return n, errors.New("Failed to decrypt SecretConnection")
}
incr2Nonce(sc.recvNonce)
// end decryption
var chunkLength = binary.BigEndian.Uint16(frame) // read the first two bytes
if chunkLength > dataMaxSize {
return 0, errors.New("chunkLength is greater than dataMaxSize")
}
var chunk = frame[dataLenSize : dataLenSize+chunkLength]
n = copy(data, chunk)
sc.recvBuffer = chunk[n:]
return
}
// Implements net.Conn
func (sc *SecretConnection) Close() error { return sc.conn.Close() }
func (sc *SecretConnection) LocalAddr() net.Addr { return sc.conn.(net.Conn).LocalAddr() }
func (sc *SecretConnection) RemoteAddr() net.Addr { return sc.conn.(net.Conn).RemoteAddr() }
func (sc *SecretConnection) SetDeadline(t time.Time) error { return sc.conn.(net.Conn).SetDeadline(t) }
func (sc *SecretConnection) SetReadDeadline(t time.Time) error {
return sc.conn.(net.Conn).SetReadDeadline(t)
}
func (sc *SecretConnection) SetWriteDeadline(t time.Time) error {
return sc.conn.(net.Conn).SetWriteDeadline(t)
}
func genEphKeys() (ephPub, ephPriv *[32]byte) {
var err error
ephPub, ephPriv, err = box.GenerateKey(crand.Reader)
if err != nil {
cmn.PanicCrisis("Could not generate ephemeral keypairs")
}
return
}
func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[32]byte, err error) {
var err1, err2 error
cmn.Parallel(
func() {
_, err1 = conn.Write(locEphPub[:])
},
func() {
remEphPub = new([32]byte)
_, err2 = io.ReadFull(conn, remEphPub[:])
},
)
if err1 != nil {
return nil, err1
}
if err2 != nil {
return nil, err2
}
return remEphPub, nil
}
func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) {
shrSecret = new([32]byte)
box.Precompute(shrSecret, remPubKey, locPrivKey)
return
}
func sort32(foo, bar *[32]byte) (lo, hi *[32]byte) {
if bytes.Compare(foo[:], bar[:]) < 0 {
lo = foo
hi = bar
} else {
lo = bar
hi = foo
}
return
}
func genNonces(loPubKey, hiPubKey *[32]byte, locIsLo bool) (recvNonce, sendNonce *[24]byte) {
nonce1 := hash24(append(loPubKey[:], hiPubKey[:]...))
nonce2 := new([24]byte)
copy(nonce2[:], nonce1[:])
nonce2[len(nonce2)-1] ^= 0x01
if locIsLo {
recvNonce = nonce1
sendNonce = nonce2
} else {
recvNonce = nonce2
sendNonce = nonce1
}
return
}
func genChallenge(loPubKey, hiPubKey *[32]byte) (challenge *[32]byte) {
return hash32(append(loPubKey[:], hiPubKey[:]...))
}
func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKeyEd25519) (signature crypto.SignatureEd25519) {
signature = locPrivKey.Sign(challenge[:]).Unwrap().(crypto.SignatureEd25519)
return
}
type authSigMessage struct {
Key crypto.PubKey
Sig crypto.Signature
}
func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKeyEd25519, signature crypto.SignatureEd25519) (*authSigMessage, error) {
var recvMsg authSigMessage
var err1, err2 error
cmn.Parallel(
func() {
msgBytes := wire.BinaryBytes(authSigMessage{pubKey.Wrap(), signature.Wrap()})
_, err1 = sc.Write(msgBytes)
},
func() {
readBuffer := make([]byte, authSigMsgSize)
_, err2 = io.ReadFull(sc, readBuffer)
if err2 != nil {
return
}
n := int(0) // not used.
recvMsg = wire.ReadBinary(authSigMessage{}, bytes.NewBuffer(readBuffer), authSigMsgSize, &n, &err2).(authSigMessage)
})
if err1 != nil {
return nil, err1
}
if err2 != nil {
return nil, err2
}
return &recvMsg, nil
}
func verifyChallengeSignature(challenge *[32]byte, remPubKey crypto.PubKeyEd25519, remSignature crypto.SignatureEd25519) bool {
return remPubKey.VerifyBytes(challenge[:], remSignature.Wrap())
}
//--------------------------------------------------------------------------------
// sha256
func hash32(input []byte) (res *[32]byte) {
hasher := sha256.New()
hasher.Write(input) // does not error
resSlice := hasher.Sum(nil)
res = new([32]byte)
copy(res[:], resSlice)
return
}
// We only fill in the first 20 bytes with ripemd160
func hash24(input []byte) (res *[24]byte) {
hasher := ripemd160.New()
hasher.Write(input) // does not error
resSlice := hasher.Sum(nil)
res = new([24]byte)
copy(res[:], resSlice)
return
}
// ripemd160
func hash20(input []byte) (res *[20]byte) {
hasher := ripemd160.New()
hasher.Write(input) // does not error
resSlice := hasher.Sum(nil)
res = new([20]byte)
copy(res[:], resSlice)
return
}
// increment nonce big-endian by 2 with wraparound.
func incr2Nonce(nonce *[24]byte) {
incrNonce(nonce)
incrNonce(nonce)
}
// increment nonce big-endian by 1 with wraparound.
func incrNonce(nonce *[24]byte) {
for i := 23; 0 <= i; i-- {
nonce[i] += 1
if nonce[i] != 0 {
return
}
}
}

View File

@@ -0,0 +1,202 @@
package p2p
import (
"bytes"
"io"
"testing"
"github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common"
)
type dummyConn struct {
*io.PipeReader
*io.PipeWriter
}
func (drw dummyConn) Close() (err error) {
err2 := drw.PipeWriter.CloseWithError(io.EOF)
err1 := drw.PipeReader.Close()
if err2 != nil {
return err
}
return err1
}
// Each returned ReadWriteCloser is akin to a net.Connection
func makeDummyConnPair() (fooConn, barConn dummyConn) {
barReader, fooWriter := io.Pipe()
fooReader, barWriter := io.Pipe()
return dummyConn{fooReader, fooWriter}, dummyConn{barReader, barWriter}
}
func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) {
fooConn, barConn := makeDummyConnPair()
fooPrvKey := crypto.GenPrivKeyEd25519()
fooPubKey := fooPrvKey.PubKey().Unwrap().(crypto.PubKeyEd25519)
barPrvKey := crypto.GenPrivKeyEd25519()
barPubKey := barPrvKey.PubKey().Unwrap().(crypto.PubKeyEd25519)
cmn.Parallel(
func() {
var err error
fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey)
if err != nil {
tb.Errorf("Failed to establish SecretConnection for foo: %v", err)
return
}
remotePubBytes := fooSecConn.RemotePubKey()
if !bytes.Equal(remotePubBytes[:], barPubKey[:]) {
tb.Errorf("Unexpected fooSecConn.RemotePubKey. Expected %v, got %v",
barPubKey, fooSecConn.RemotePubKey())
}
},
func() {
var err error
barSecConn, err = MakeSecretConnection(barConn, barPrvKey)
if barSecConn == nil {
tb.Errorf("Failed to establish SecretConnection for bar: %v", err)
return
}
remotePubBytes := barSecConn.RemotePubKey()
if !bytes.Equal(remotePubBytes[:], fooPubKey[:]) {
tb.Errorf("Unexpected barSecConn.RemotePubKey. Expected %v, got %v",
fooPubKey, barSecConn.RemotePubKey())
}
})
return
}
func TestSecretConnectionHandshake(t *testing.T) {
fooSecConn, barSecConn := makeSecretConnPair(t)
fooSecConn.Close()
barSecConn.Close()
}
func TestSecretConnectionReadWrite(t *testing.T) {
fooConn, barConn := makeDummyConnPair()
fooWrites, barWrites := []string{}, []string{}
fooReads, barReads := []string{}, []string{}
// Pre-generate the things to write (for foo & bar)
for i := 0; i < 100; i++ {
fooWrites = append(fooWrites, cmn.RandStr((cmn.RandInt()%(dataMaxSize*5))+1))
barWrites = append(barWrites, cmn.RandStr((cmn.RandInt()%(dataMaxSize*5))+1))
}
// A helper that will run with (fooConn, fooWrites, fooReads) and vice versa
genNodeRunner := func(nodeConn dummyConn, nodeWrites []string, nodeReads *[]string) func() {
return func() {
// Node handskae
nodePrvKey := crypto.GenPrivKeyEd25519()
nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey)
if err != nil {
t.Errorf("Failed to establish SecretConnection for node: %v", err)
return
}
// In parallel, handle reads and writes
cmn.Parallel(
func() {
// Node writes
for _, nodeWrite := range nodeWrites {
n, err := nodeSecretConn.Write([]byte(nodeWrite))
if err != nil {
t.Errorf("Failed to write to nodeSecretConn: %v", err)
return
}
if n != len(nodeWrite) {
t.Errorf("Failed to write all bytes. Expected %v, wrote %v", len(nodeWrite), n)
return
}
}
nodeConn.PipeWriter.Close()
},
func() {
// Node reads
readBuffer := make([]byte, dataMaxSize)
for {
n, err := nodeSecretConn.Read(readBuffer)
if err == io.EOF {
return
} else if err != nil {
t.Errorf("Failed to read from nodeSecretConn: %v", err)
return
}
*nodeReads = append(*nodeReads, string(readBuffer[:n]))
}
nodeConn.PipeReader.Close()
})
}
}
// Run foo & bar in parallel
cmn.Parallel(
genNodeRunner(fooConn, fooWrites, &fooReads),
genNodeRunner(barConn, barWrites, &barReads),
)
// A helper to ensure that the writes and reads match.
// Additionally, small writes (<= dataMaxSize) must be atomically read.
compareWritesReads := func(writes []string, reads []string) {
for {
// Pop next write & corresponding reads
var read, write string = "", writes[0]
var readCount = 0
for _, readChunk := range reads {
read += readChunk
readCount += 1
if len(write) <= len(read) {
break
}
if len(write) <= dataMaxSize {
break // atomicity of small writes
}
}
// Compare
if write != read {
t.Errorf("Expected to read %X, got %X", write, read)
}
// Iterate
writes = writes[1:]
reads = reads[readCount:]
if len(writes) == 0 {
break
}
}
}
compareWritesReads(fooWrites, barReads)
compareWritesReads(barWrites, fooReads)
}
func BenchmarkSecretConnection(b *testing.B) {
b.StopTimer()
fooSecConn, barSecConn := makeSecretConnPair(b)
fooWriteText := cmn.RandStr(dataMaxSize)
// Consume reads from bar's reader
go func() {
readBuffer := make([]byte, dataMaxSize)
for {
_, err := barSecConn.Read(readBuffer)
if err == io.EOF {
return
} else if err != nil {
b.Fatalf("Failed to read from barSecConn: %v", err)
}
}
}()
b.StartTimer()
for i := 0; i < b.N; i++ {
_, err := fooSecConn.Write([]byte(fooWriteText))
if err != nil {
b.Fatalf("Failed to write to fooSecConn: %v", err)
}
}
b.StopTimer()
fooSecConn.Close()
//barSecConn.Close() race condition
}

577
p2p/switch.go Normal file
View File

@@ -0,0 +1,577 @@
package p2p
import (
"errors"
"fmt"
"math/rand"
"net"
"time"
crypto "github.com/tendermint/go-crypto"
cfg "github.com/tendermint/tendermint/config"
cmn "github.com/tendermint/tmlibs/common"
)
const (
reconnectAttempts = 30
reconnectInterval = 3 * time.Second
)
type Reactor interface {
cmn.Service // Start, Stop
SetSwitch(*Switch)
GetChannels() []*ChannelDescriptor
AddPeer(peer *Peer)
RemovePeer(peer *Peer, reason interface{})
Receive(chID byte, peer *Peer, msgBytes []byte)
}
//--------------------------------------
type BaseReactor struct {
cmn.BaseService // Provides Start, Stop, .Quit
Switch *Switch
}
func NewBaseReactor(name string, impl Reactor) *BaseReactor {
return &BaseReactor{
BaseService: *cmn.NewBaseService(nil, name, impl),
Switch: nil,
}
}
func (br *BaseReactor) SetSwitch(sw *Switch) {
br.Switch = sw
}
func (_ *BaseReactor) GetChannels() []*ChannelDescriptor { return nil }
func (_ *BaseReactor) AddPeer(peer *Peer) {}
func (_ *BaseReactor) RemovePeer(peer *Peer, reason interface{}) {}
func (_ *BaseReactor) Receive(chID byte, peer *Peer, msgBytes []byte) {}
//-----------------------------------------------------------------------------
/*
The `Switch` handles peer connections and exposes an API to receive incoming messages
on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one
or more `Channels`. So while sending outgoing messages is typically performed on the peer,
incoming messages are received on the reactor.
*/
type Switch struct {
cmn.BaseService
config *cfg.P2PConfig
peerConfig *PeerConfig
listeners []Listener
reactors map[string]Reactor
chDescs []*ChannelDescriptor
reactorsByCh map[byte]Reactor
peers *PeerSet
dialing *cmn.CMap
nodeInfo *NodeInfo // our node info
nodePrivKey crypto.PrivKeyEd25519 // our node privkey
filterConnByAddr func(net.Addr) error
filterConnByPubKey func(crypto.PubKeyEd25519) error
}
var (
ErrSwitchDuplicatePeer = errors.New("Duplicate peer")
)
func NewSwitch(config *cfg.P2PConfig) *Switch {
sw := &Switch{
config: config,
peerConfig: DefaultPeerConfig(),
reactors: make(map[string]Reactor),
chDescs: make([]*ChannelDescriptor, 0),
reactorsByCh: make(map[byte]Reactor),
peers: NewPeerSet(),
dialing: cmn.NewCMap(),
nodeInfo: nil,
}
sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw)
return sw
}
// Not goroutine safe.
func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor {
// Validate the reactor.
// No two reactors can share the same channel.
reactorChannels := reactor.GetChannels()
for _, chDesc := range reactorChannels {
chID := chDesc.ID
if sw.reactorsByCh[chID] != nil {
cmn.PanicSanity(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor))
}
sw.chDescs = append(sw.chDescs, chDesc)
sw.reactorsByCh[chID] = reactor
}
sw.reactors[name] = reactor
reactor.SetSwitch(sw)
return reactor
}
// Not goroutine safe.
func (sw *Switch) Reactors() map[string]Reactor {
return sw.reactors
}
// Not goroutine safe.
func (sw *Switch) Reactor(name string) Reactor {
return sw.reactors[name]
}
// Not goroutine safe.
func (sw *Switch) AddListener(l Listener) {
sw.listeners = append(sw.listeners, l)
}
// Not goroutine safe.
func (sw *Switch) Listeners() []Listener {
return sw.listeners
}
// Not goroutine safe.
func (sw *Switch) IsListening() bool {
return len(sw.listeners) > 0
}
// Not goroutine safe.
func (sw *Switch) SetNodeInfo(nodeInfo *NodeInfo) {
sw.nodeInfo = nodeInfo
}
// Not goroutine safe.
func (sw *Switch) NodeInfo() *NodeInfo {
return sw.nodeInfo
}
// Not goroutine safe.
// NOTE: Overwrites sw.nodeInfo.PubKey
func (sw *Switch) SetNodePrivKey(nodePrivKey crypto.PrivKeyEd25519) {
sw.nodePrivKey = nodePrivKey
if sw.nodeInfo != nil {
sw.nodeInfo.PubKey = nodePrivKey.PubKey().Unwrap().(crypto.PubKeyEd25519)
}
}
// Switch.Start() starts all the reactors, peers, and listeners.
func (sw *Switch) OnStart() error {
sw.BaseService.OnStart()
// Start reactors
for _, reactor := range sw.reactors {
_, err := reactor.Start()
if err != nil {
return err
}
}
// Start peers
for _, peer := range sw.peers.List() {
sw.startInitPeer(peer)
}
// Start listeners
for _, listener := range sw.listeners {
go sw.listenerRoutine(listener)
}
return nil
}
func (sw *Switch) OnStop() {
sw.BaseService.OnStop()
// Stop listeners
for _, listener := range sw.listeners {
listener.Stop()
}
sw.listeners = nil
// Stop peers
for _, peer := range sw.peers.List() {
peer.Stop()
sw.peers.Remove(peer)
}
// Stop reactors
for _, reactor := range sw.reactors {
reactor.Stop()
}
}
// NOTE: This performs a blocking handshake before the peer is added.
// CONTRACT: If error is returned, peer is nil, and conn is immediately closed.
func (sw *Switch) AddPeer(peer *Peer) error {
if err := sw.FilterConnByAddr(peer.Addr()); err != nil {
return err
}
if err := sw.FilterConnByPubKey(peer.PubKey()); err != nil {
return err
}
if err := peer.HandshakeTimeout(sw.nodeInfo, time.Duration(sw.peerConfig.HandshakeTimeout*time.Second)); err != nil {
return err
}
// Avoid self
if sw.nodeInfo.PubKey.Equals(peer.PubKey().Wrap()) {
return errors.New("Ignoring connection from self")
}
// Check version, chain id
if err := sw.nodeInfo.CompatibleWith(peer.NodeInfo); err != nil {
return err
}
// Check for duplicate peer
if sw.peers.Has(peer.Key) {
return ErrSwitchDuplicatePeer
}
// Start peer
if sw.IsRunning() {
sw.startInitPeer(peer)
}
// Add the peer to .peers.
// We start it first so that a peer in the list is safe to Stop.
// It should not err since we already checked peers.Has()
if err := sw.peers.Add(peer); err != nil {
return err
}
sw.Logger.Info("Added peer", "peer", peer)
return nil
}
func (sw *Switch) FilterConnByAddr(addr net.Addr) error {
if sw.filterConnByAddr != nil {
return sw.filterConnByAddr(addr)
}
return nil
}
func (sw *Switch) FilterConnByPubKey(pubkey crypto.PubKeyEd25519) error {
if sw.filterConnByPubKey != nil {
return sw.filterConnByPubKey(pubkey)
}
return nil
}
func (sw *Switch) SetAddrFilter(f func(net.Addr) error) {
sw.filterConnByAddr = f
}
func (sw *Switch) SetPubKeyFilter(f func(crypto.PubKeyEd25519) error) {
sw.filterConnByPubKey = f
}
func (sw *Switch) startInitPeer(peer *Peer) {
peer.Start() // spawn send/recv routines
for _, reactor := range sw.reactors {
reactor.AddPeer(peer)
}
}
// Dial a list of seeds asynchronously in random order
func (sw *Switch) DialSeeds(addrBook *AddrBook, seeds []string) error {
netAddrs, err := NewNetAddressStrings(seeds)
if err != nil {
return err
}
if addrBook != nil {
// add seeds to `addrBook`
ourAddrS := sw.nodeInfo.ListenAddr
ourAddr, _ := NewNetAddressString(ourAddrS)
for _, netAddr := range netAddrs {
// do not add ourselves
if netAddr.Equals(ourAddr) {
continue
}
addrBook.AddAddress(netAddr, ourAddr)
}
addrBook.Save()
}
// permute the list, dial them in random order.
perm := rand.Perm(len(netAddrs))
for i := 0; i < len(perm); i++ {
go func(i int) {
time.Sleep(time.Duration(rand.Int63n(3000)) * time.Millisecond)
j := perm[i]
sw.dialSeed(netAddrs[j])
}(i)
}
return nil
}
func (sw *Switch) dialSeed(addr *NetAddress) {
peer, err := sw.DialPeerWithAddress(addr, true)
if err != nil {
sw.Logger.Error("Error dialing seed", "error", err)
} else {
sw.Logger.Info("Connected to seed", "peer", peer)
}
}
func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) (*Peer, error) {
sw.dialing.Set(addr.IP.String(), addr)
defer sw.dialing.Delete(addr.IP.String())
sw.Logger.Info("Dialing peer", "address", addr)
peer, err := newOutboundPeerWithConfig(addr, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, sw.peerConfig)
if err != nil {
sw.Logger.Error("Failed to dial peer", "address", addr, "error", err)
return nil, err
}
peer.SetLogger(sw.Logger.With("peer", addr))
if persistent {
peer.makePersistent()
}
err = sw.AddPeer(peer)
if err != nil {
sw.Logger.Error("Failed to add peer", "address", addr, "error", err)
peer.CloseConn()
return nil, err
}
sw.Logger.Info("Dialed and added peer", "address", addr, "peer", peer)
return peer, nil
}
func (sw *Switch) IsDialing(addr *NetAddress) bool {
return sw.dialing.Has(addr.IP.String())
}
// Broadcast runs a go routine for each attempted send, which will block
// trying to send for defaultSendTimeoutSeconds. Returns a channel
// which receives success values for each attempted send (false if times out)
// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved.
func (sw *Switch) Broadcast(chID byte, msg interface{}) chan bool {
successChan := make(chan bool, len(sw.peers.List()))
sw.Logger.Debug("Broadcast", "channel", chID, "msg", msg)
for _, peer := range sw.peers.List() {
go func(peer *Peer) {
success := peer.Send(chID, msg)
successChan <- success
}(peer)
}
return successChan
}
// Returns the count of outbound/inbound and outbound-dialing peers.
func (sw *Switch) NumPeers() (outbound, inbound, dialing int) {
peers := sw.peers.List()
for _, peer := range peers {
if peer.outbound {
outbound++
} else {
inbound++
}
}
dialing = sw.dialing.Size()
return
}
func (sw *Switch) Peers() IPeerSet {
return sw.peers
}
// Disconnect from a peer due to external error, retry if it is a persistent peer.
// TODO: make record depending on reason.
func (sw *Switch) StopPeerForError(peer *Peer, reason interface{}) {
addr := NewNetAddress(peer.Addr())
sw.Logger.Info("Stopping peer for error", "peer", peer, "error", reason)
sw.stopAndRemovePeer(peer, reason)
if peer.IsPersistent() {
go func() {
sw.Logger.Info("Reconnecting to peer", "peer", peer)
for i := 1; i < reconnectAttempts; i++ {
if !sw.IsRunning() {
return
}
peer, err := sw.DialPeerWithAddress(addr, true)
if err != nil {
if i == reconnectAttempts {
sw.Logger.Info("Error reconnecting to peer. Giving up", "tries", i, "error", err)
return
}
sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "error", err)
time.Sleep(reconnectInterval)
continue
}
sw.Logger.Info("Reconnected to peer", "peer", peer)
return
}
}()
}
}
// Disconnect from a peer gracefully.
// TODO: handle graceful disconnects.
func (sw *Switch) StopPeerGracefully(peer *Peer) {
sw.Logger.Info("Stopping peer gracefully")
sw.stopAndRemovePeer(peer, nil)
}
func (sw *Switch) stopAndRemovePeer(peer *Peer, reason interface{}) {
sw.peers.Remove(peer)
peer.Stop()
for _, reactor := range sw.reactors {
reactor.RemovePeer(peer, reason)
}
}
func (sw *Switch) listenerRoutine(l Listener) {
for {
inConn, ok := <-l.Connections()
if !ok {
break
}
// ignore connection if we already have enough
maxPeers := sw.config.MaxNumPeers
if maxPeers <= sw.peers.Size() {
sw.Logger.Info("Ignoring inbound connection: already have enough peers", "address", inConn.RemoteAddr().String(), "numPeers", sw.peers.Size(), "max", maxPeers)
continue
}
// New inbound connection!
err := sw.addPeerWithConnectionAndConfig(inConn, sw.peerConfig)
if err != nil {
sw.Logger.Info("Ignoring inbound connection: error while adding peer", "address", inConn.RemoteAddr().String(), "error", err)
continue
}
// NOTE: We don't yet have the listening port of the
// remote (if they have a listener at all).
// The peerHandshake will handle that
}
// cleanup
}
//-----------------------------------------------------------------------------
type SwitchEventNewPeer struct {
Peer *Peer
}
type SwitchEventDonePeer struct {
Peer *Peer
Error interface{}
}
//------------------------------------------------------------------
// Switches connected via arbitrary net.Conn; useful for testing
// Returns n switches, connected according to the connect func.
// If connect==Connect2Switches, the switches will be fully connected.
// initSwitch defines how the ith switch should be initialized (ie. with what reactors).
// NOTE: panics if any switch fails to start.
func MakeConnectedSwitches(cfg *cfg.P2PConfig, n int, initSwitch func(int, *Switch) *Switch, connect func([]*Switch, int, int)) []*Switch {
switches := make([]*Switch, n)
for i := 0; i < n; i++ {
switches[i] = makeSwitch(cfg, i, "testing", "123.123.123", initSwitch)
}
if err := StartSwitches(switches); err != nil {
panic(err)
}
for i := 0; i < n; i++ {
for j := i; j < n; j++ {
connect(switches, i, j)
}
}
return switches
}
var PanicOnAddPeerErr = false
// Will connect switches i and j via net.Pipe()
// Blocks until a conection is established.
// NOTE: caller ensures i and j are within bounds
func Connect2Switches(switches []*Switch, i, j int) {
switchI := switches[i]
switchJ := switches[j]
c1, c2 := net.Pipe()
doneCh := make(chan struct{})
go func() {
err := switchI.addPeerWithConnection(c1)
if PanicOnAddPeerErr && err != nil {
panic(err)
}
doneCh <- struct{}{}
}()
go func() {
err := switchJ.addPeerWithConnection(c2)
if PanicOnAddPeerErr && err != nil {
panic(err)
}
doneCh <- struct{}{}
}()
<-doneCh
<-doneCh
}
func StartSwitches(switches []*Switch) error {
for _, s := range switches {
_, err := s.Start() // start switch and reactors
if err != nil {
return err
}
}
return nil
}
func makeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch func(int, *Switch) *Switch) *Switch {
privKey := crypto.GenPrivKeyEd25519()
// new switch, add reactors
// TODO: let the config be passed in?
s := initSwitch(i, NewSwitch(cfg))
s.SetNodeInfo(&NodeInfo{
PubKey: privKey.PubKey().Unwrap().(crypto.PubKeyEd25519),
Moniker: cmn.Fmt("switch%d", i),
Network: network,
Version: version,
RemoteAddr: cmn.Fmt("%v:%v", network, rand.Intn(64512)+1023),
ListenAddr: cmn.Fmt("%v:%v", network, rand.Intn(64512)+1023),
})
s.SetNodePrivKey(privKey)
return s
}
func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
peer, err := newInboundPeer(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey)
if err != nil {
conn.Close()
return err
}
peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr()))
if err = sw.AddPeer(peer); err != nil {
conn.Close()
return err
}
return nil
}
func (sw *Switch) addPeerWithConnectionAndConfig(conn net.Conn, config *PeerConfig) error {
peer, err := newInboundPeerWithConfig(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, config)
if err != nil {
conn.Close()
return err
}
peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr()))
if err = sw.AddPeer(peer); err != nil {
conn.Close()
return err
}
return nil
}

331
p2p/switch_test.go Normal file
View File

@@ -0,0 +1,331 @@
package p2p
import (
"bytes"
"fmt"
"net"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tmlibs/log"
)
var (
config *cfg.P2PConfig
)
func init() {
config = cfg.DefaultP2PConfig()
config.PexReactor = true
}
type PeerMessage struct {
PeerKey string
Bytes []byte
Counter int
}
type TestReactor struct {
BaseReactor
mtx sync.Mutex
channels []*ChannelDescriptor
peersAdded []*Peer
peersRemoved []*Peer
logMessages bool
msgsCounter int
msgsReceived map[byte][]PeerMessage
}
func NewTestReactor(channels []*ChannelDescriptor, logMessages bool) *TestReactor {
tr := &TestReactor{
channels: channels,
logMessages: logMessages,
msgsReceived: make(map[byte][]PeerMessage),
}
tr.BaseReactor = *NewBaseReactor("TestReactor", tr)
tr.SetLogger(log.TestingLogger())
return tr
}
func (tr *TestReactor) GetChannels() []*ChannelDescriptor {
return tr.channels
}
func (tr *TestReactor) AddPeer(peer *Peer) {
tr.mtx.Lock()
defer tr.mtx.Unlock()
tr.peersAdded = append(tr.peersAdded, peer)
}
func (tr *TestReactor) RemovePeer(peer *Peer, reason interface{}) {
tr.mtx.Lock()
defer tr.mtx.Unlock()
tr.peersRemoved = append(tr.peersRemoved, peer)
}
func (tr *TestReactor) Receive(chID byte, peer *Peer, msgBytes []byte) {
if tr.logMessages {
tr.mtx.Lock()
defer tr.mtx.Unlock()
//fmt.Printf("Received: %X, %X\n", chID, msgBytes)
tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.Key, msgBytes, tr.msgsCounter})
tr.msgsCounter++
}
}
func (tr *TestReactor) getMsgs(chID byte) []PeerMessage {
tr.mtx.Lock()
defer tr.mtx.Unlock()
return tr.msgsReceived[chID]
}
//-----------------------------------------------------------------------------
// convenience method for creating two switches connected to each other.
// XXX: note this uses net.Pipe and not a proper TCP conn
func makeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) {
// Create two switches that will be interconnected.
switches := MakeConnectedSwitches(config, 2, initSwitch, Connect2Switches)
return switches[0], switches[1]
}
func initSwitchFunc(i int, sw *Switch) *Switch {
// Make two reactors of two channels each
sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{ID: byte(0x00), Priority: 10},
&ChannelDescriptor{ID: byte(0x01), Priority: 10},
}, true))
sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{ID: byte(0x02), Priority: 10},
&ChannelDescriptor{ID: byte(0x03), Priority: 10},
}, true))
return sw
}
func TestSwitches(t *testing.T) {
s1, s2 := makeSwitchPair(t, initSwitchFunc)
defer s1.Stop()
defer s2.Stop()
if s1.Peers().Size() != 1 {
t.Errorf("Expected exactly 1 peer in s1, got %v", s1.Peers().Size())
}
if s2.Peers().Size() != 1 {
t.Errorf("Expected exactly 1 peer in s2, got %v", s2.Peers().Size())
}
// Lets send some messages
ch0Msg := "channel zero"
ch1Msg := "channel foo"
ch2Msg := "channel bar"
s1.Broadcast(byte(0x00), ch0Msg)
s1.Broadcast(byte(0x01), ch1Msg)
s1.Broadcast(byte(0x02), ch2Msg)
// Wait for things to settle...
time.Sleep(5000 * time.Millisecond)
// Check message on ch0
ch0Msgs := s2.Reactor("foo").(*TestReactor).getMsgs(byte(0x00))
if len(ch0Msgs) != 1 {
t.Errorf("Expected to have received 1 message in ch0")
}
if !bytes.Equal(ch0Msgs[0].Bytes, wire.BinaryBytes(ch0Msg)) {
t.Errorf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(ch0Msg), ch0Msgs[0].Bytes)
}
// Check message on ch1
ch1Msgs := s2.Reactor("foo").(*TestReactor).getMsgs(byte(0x01))
if len(ch1Msgs) != 1 {
t.Errorf("Expected to have received 1 message in ch1")
}
if !bytes.Equal(ch1Msgs[0].Bytes, wire.BinaryBytes(ch1Msg)) {
t.Errorf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(ch1Msg), ch1Msgs[0].Bytes)
}
// Check message on ch2
ch2Msgs := s2.Reactor("bar").(*TestReactor).getMsgs(byte(0x02))
if len(ch2Msgs) != 1 {
t.Errorf("Expected to have received 1 message in ch2")
}
if !bytes.Equal(ch2Msgs[0].Bytes, wire.BinaryBytes(ch2Msg)) {
t.Errorf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(ch2Msg), ch2Msgs[0].Bytes)
}
}
func TestConnAddrFilter(t *testing.T) {
s1 := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
s2 := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
c1, c2 := net.Pipe()
s1.SetAddrFilter(func(addr net.Addr) error {
if addr.String() == c1.RemoteAddr().String() {
return fmt.Errorf("Error: pipe is blacklisted")
}
return nil
})
// connect to good peer
go func() {
s1.addPeerWithConnection(c1)
}()
go func() {
s2.addPeerWithConnection(c2)
}()
// Wait for things to happen, peers to get added...
time.Sleep(100 * time.Millisecond * time.Duration(4))
defer s1.Stop()
defer s2.Stop()
if s1.Peers().Size() != 0 {
t.Errorf("Expected s1 not to connect to peers, got %d", s1.Peers().Size())
}
if s2.Peers().Size() != 0 {
t.Errorf("Expected s2 not to connect to peers, got %d", s2.Peers().Size())
}
}
func TestConnPubKeyFilter(t *testing.T) {
s1 := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
s2 := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
c1, c2 := net.Pipe()
// set pubkey filter
s1.SetPubKeyFilter(func(pubkey crypto.PubKeyEd25519) error {
if bytes.Equal(pubkey.Bytes(), s2.nodeInfo.PubKey.Bytes()) {
return fmt.Errorf("Error: pipe is blacklisted")
}
return nil
})
// connect to good peer
go func() {
s1.addPeerWithConnection(c1)
}()
go func() {
s2.addPeerWithConnection(c2)
}()
// Wait for things to happen, peers to get added...
time.Sleep(100 * time.Millisecond * time.Duration(4))
defer s1.Stop()
defer s2.Stop()
if s1.Peers().Size() != 0 {
t.Errorf("Expected s1 not to connect to peers, got %d", s1.Peers().Size())
}
if s2.Peers().Size() != 0 {
t.Errorf("Expected s2 not to connect to peers, got %d", s2.Peers().Size())
}
}
func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
assert, require := assert.New(t), require.New(t)
sw := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
sw.Start()
defer sw.Stop()
// simulate remote peer
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: DefaultPeerConfig()}
rp.Start()
defer rp.Stop()
peer, err := newOutboundPeer(rp.Addr(), sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey)
require.Nil(err)
err = sw.AddPeer(peer)
require.Nil(err)
// simulate failure by closing connection
peer.CloseConn()
time.Sleep(100 * time.Millisecond)
assert.Zero(sw.Peers().Size())
assert.False(peer.IsRunning())
}
func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
assert, require := assert.New(t), require.New(t)
sw := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
sw.Start()
defer sw.Stop()
// simulate remote peer
rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: DefaultPeerConfig()}
rp.Start()
defer rp.Stop()
peer, err := newOutboundPeer(rp.Addr(), sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey)
peer.makePersistent()
require.Nil(err)
err = sw.AddPeer(peer)
require.Nil(err)
// simulate failure by closing connection
peer.CloseConn()
// TODO: actually detect the disconnection and wait for reconnect
time.Sleep(100 * time.Millisecond)
assert.NotZero(sw.Peers().Size())
assert.False(peer.IsRunning())
}
func BenchmarkSwitches(b *testing.B) {
b.StopTimer()
s1, s2 := makeSwitchPair(b, func(i int, sw *Switch) *Switch {
// Make bar reactors of bar channels each
sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{ID: byte(0x00), Priority: 10},
&ChannelDescriptor{ID: byte(0x01), Priority: 10},
}, false))
sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{ID: byte(0x02), Priority: 10},
&ChannelDescriptor{ID: byte(0x03), Priority: 10},
}, false))
return sw
})
defer s1.Stop()
defer s2.Stop()
// Allow time for goroutines to boot up
time.Sleep(1000 * time.Millisecond)
b.StartTimer()
numSuccess, numFailure := 0, 0
// Send random message from foo channel to another
for i := 0; i < b.N; i++ {
chID := byte(i % 4)
successChan := s1.Broadcast(chID, "test data")
for s := range successChan {
if s {
numSuccess++
} else {
numFailure++
}
}
}
b.Logf("success: %v, failure: %v", numSuccess, numFailure)
// Allow everything to flush before stopping switches & closing connections.
b.StopTimer()
time.Sleep(1000 * time.Millisecond)
}

81
p2p/types.go Normal file
View File

@@ -0,0 +1,81 @@
package p2p
import (
"fmt"
"net"
"strconv"
"strings"
crypto "github.com/tendermint/go-crypto"
)
const maxNodeInfoSize = 10240 // 10Kb
type NodeInfo struct {
PubKey crypto.PubKeyEd25519 `json:"pub_key"`
Moniker string `json:"moniker"`
Network string `json:"network"`
RemoteAddr string `json:"remote_addr"`
ListenAddr string `json:"listen_addr"`
Version string `json:"version"` // major.minor.revision
Other []string `json:"other"` // other application specific data
}
// CONTRACT: two nodes are compatible if the major/minor versions match and network match
func (info *NodeInfo) CompatibleWith(other *NodeInfo) error {
iMajor, iMinor, _, iErr := splitVersion(info.Version)
oMajor, oMinor, _, oErr := splitVersion(other.Version)
// if our own version number is not formatted right, we messed up
if iErr != nil {
return iErr
}
// version number must be formatted correctly ("x.x.x")
if oErr != nil {
return oErr
}
// major version must match
if iMajor != oMajor {
return fmt.Errorf("Peer is on a different major version. Got %v, expected %v", oMajor, iMajor)
}
// minor version must match
if iMinor != oMinor {
return fmt.Errorf("Peer is on a different minor version. Got %v, expected %v", oMinor, iMinor)
}
// nodes must be on the same network
if info.Network != other.Network {
return fmt.Errorf("Peer is on a different network. Got %v, expected %v", other.Network, info.Network)
}
return nil
}
func (info *NodeInfo) ListenHost() string {
host, _, _ := net.SplitHostPort(info.ListenAddr)
return host
}
func (info *NodeInfo) ListenPort() int {
_, port, _ := net.SplitHostPort(info.ListenAddr)
port_i, err := strconv.Atoi(port)
if err != nil {
return -1
}
return port_i
}
func (info NodeInfo) String() string {
return fmt.Sprintf("NodeInfo{pk: %v, moniker: %v, network: %v [remote %v, listen %v], version: %v (%v)}", info.PubKey, info.Moniker, info.Network, info.RemoteAddr, info.ListenAddr, info.Version, info.Other)
}
func splitVersion(version string) (string, string, string, error) {
spl := strings.Split(version, ".")
if len(spl) != 3 {
return "", "", "", fmt.Errorf("Invalid version format %v", version)
}
return spl[0], spl[1], spl[2], nil
}

5
p2p/upnp/README.md Normal file
View File

@@ -0,0 +1,5 @@
# `tendermint/p2p/upnp`
## Resources
* http://www.upnp-hacks.org/upnp.html

112
p2p/upnp/probe.go Normal file
View File

@@ -0,0 +1,112 @@
package upnp
import (
"errors"
"fmt"
"net"
"time"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
)
type UPNPCapabilities struct {
PortMapping bool
Hairpin bool
}
func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Listener, net.IP, error) {
nat, err := Discover()
if err != nil {
return nil, nil, nil, errors.New(fmt.Sprintf("NAT upnp could not be discovered: %v", err))
}
logger.Info(cmn.Fmt("ourIP: %v", nat.(*upnpNAT).ourIP))
ext, err := nat.GetExternalAddress()
if err != nil {
return nat, nil, nil, errors.New(fmt.Sprintf("External address error: %v", err))
}
logger.Info(cmn.Fmt("External address: %v", ext))
port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0)
if err != nil {
return nat, nil, ext, errors.New(fmt.Sprintf("Port mapping error: %v", err))
}
logger.Info(cmn.Fmt("Port mapping mapped: %v", port))
// also run the listener, open for all remote addresses.
listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort))
if err != nil {
return nat, nil, ext, errors.New(fmt.Sprintf("Error establishing listener: %v", err))
}
return nat, listener, ext, nil
}
func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supportsHairpin bool) {
// Listener
go func() {
inConn, err := listener.Accept()
if err != nil {
logger.Info(cmn.Fmt("Listener.Accept() error: %v", err))
return
}
logger.Info(cmn.Fmt("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr()))
buf := make([]byte, 1024)
n, err := inConn.Read(buf)
if err != nil {
logger.Info(cmn.Fmt("Incoming connection read error: %v", err))
return
}
logger.Info(cmn.Fmt("Incoming connection read %v bytes: %X", n, buf))
if string(buf) == "test data" {
supportsHairpin = true
return
}
}()
// Establish outgoing
outConn, err := net.Dial("tcp", extAddr)
if err != nil {
logger.Info(cmn.Fmt("Outgoing connection dial error: %v", err))
return
}
n, err := outConn.Write([]byte("test data"))
if err != nil {
logger.Info(cmn.Fmt("Outgoing connection write error: %v", err))
return
}
logger.Info(cmn.Fmt("Outgoing connection wrote %v bytes", n))
// Wait for data receipt
time.Sleep(1 * time.Second)
return
}
func Probe(logger log.Logger) (caps UPNPCapabilities, err error) {
logger.Info("Probing for UPnP!")
intPort, extPort := 8001, 8001
nat, listener, ext, err := makeUPNPListener(intPort, extPort, logger)
if err != nil {
return
}
caps.PortMapping = true
// Deferred cleanup
defer func() {
err = nat.DeletePortMapping("tcp", intPort, extPort)
if err != nil {
logger.Error(cmn.Fmt("Port mapping delete error: %v", err))
}
listener.Close()
}()
supportsHairpin := testHairpin(listener, fmt.Sprintf("%v:%v", ext, extPort), logger)
if supportsHairpin {
caps.Hairpin = true
}
return
}

380
p2p/upnp/upnp.go Normal file
View File

@@ -0,0 +1,380 @@
/*
Taken from taipei-torrent
Just enough UPnP to be able to forward ports
*/
package upnp
// BUG(jae): TODO: use syscalls to get actual ourIP. http://pastebin.com/9exZG4rh
import (
"bytes"
"encoding/xml"
"errors"
"io/ioutil"
"net"
"net/http"
"strconv"
"strings"
"time"
)
type upnpNAT struct {
serviceURL string
ourIP string
urnDomain string
}
// protocol is either "udp" or "tcp"
type NAT interface {
GetExternalAddress() (addr net.IP, err error)
AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error)
DeletePortMapping(protocol string, externalPort, internalPort int) (err error)
}
func Discover() (nat NAT, err error) {
ssdp, err := net.ResolveUDPAddr("udp4", "239.255.255.250:1900")
if err != nil {
return
}
conn, err := net.ListenPacket("udp4", ":0")
if err != nil {
return
}
socket := conn.(*net.UDPConn)
defer socket.Close()
err = socket.SetDeadline(time.Now().Add(3 * time.Second))
if err != nil {
return
}
st := "InternetGatewayDevice:1"
buf := bytes.NewBufferString(
"M-SEARCH * HTTP/1.1\r\n" +
"HOST: 239.255.255.250:1900\r\n" +
"ST: ssdp:all\r\n" +
"MAN: \"ssdp:discover\"\r\n" +
"MX: 2\r\n\r\n")
message := buf.Bytes()
answerBytes := make([]byte, 1024)
for i := 0; i < 3; i++ {
_, err = socket.WriteToUDP(message, ssdp)
if err != nil {
return
}
var n int
n, _, err = socket.ReadFromUDP(answerBytes)
for {
n, _, err = socket.ReadFromUDP(answerBytes)
if err != nil {
break
}
answer := string(answerBytes[0:n])
if strings.Index(answer, st) < 0 {
continue
}
// HTTP header field names are case-insensitive.
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
locString := "\r\nlocation:"
answer = strings.ToLower(answer)
locIndex := strings.Index(answer, locString)
if locIndex < 0 {
continue
}
loc := answer[locIndex+len(locString):]
endIndex := strings.Index(loc, "\r\n")
if endIndex < 0 {
continue
}
locURL := strings.TrimSpace(loc[0:endIndex])
var serviceURL, urnDomain string
serviceURL, urnDomain, err = getServiceURL(locURL)
if err != nil {
return
}
var ourIP net.IP
ourIP, err = localIPv4()
if err != nil {
return
}
nat = &upnpNAT{serviceURL: serviceURL, ourIP: ourIP.String(), urnDomain: urnDomain}
return
}
}
err = errors.New("UPnP port discovery failed.")
return
}
type Envelope struct {
XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"`
Soap *SoapBody
}
type SoapBody struct {
XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Body"`
ExternalIP *ExternalIPAddressResponse
}
type ExternalIPAddressResponse struct {
XMLName xml.Name `xml:"GetExternalIPAddressResponse"`
IPAddress string `xml:"NewExternalIPAddress"`
}
type ExternalIPAddress struct {
XMLName xml.Name `xml:"NewExternalIPAddress"`
IP string
}
type UPNPService struct {
ServiceType string `xml:"serviceType"`
ControlURL string `xml:"controlURL"`
}
type DeviceList struct {
Device []Device `xml:"device"`
}
type ServiceList struct {
Service []UPNPService `xml:"service"`
}
type Device struct {
XMLName xml.Name `xml:"device"`
DeviceType string `xml:"deviceType"`
DeviceList DeviceList `xml:"deviceList"`
ServiceList ServiceList `xml:"serviceList"`
}
type Root struct {
Device Device
}
func getChildDevice(d *Device, deviceType string) *Device {
dl := d.DeviceList.Device
for i := 0; i < len(dl); i++ {
if strings.Index(dl[i].DeviceType, deviceType) >= 0 {
return &dl[i]
}
}
return nil
}
func getChildService(d *Device, serviceType string) *UPNPService {
sl := d.ServiceList.Service
for i := 0; i < len(sl); i++ {
if strings.Index(sl[i].ServiceType, serviceType) >= 0 {
return &sl[i]
}
}
return nil
}
func localIPv4() (net.IP, error) {
tt, err := net.Interfaces()
if err != nil {
return nil, err
}
for _, t := range tt {
aa, err := t.Addrs()
if err != nil {
return nil, err
}
for _, a := range aa {
ipnet, ok := a.(*net.IPNet)
if !ok {
continue
}
v4 := ipnet.IP.To4()
if v4 == nil || v4[0] == 127 { // loopback address
continue
}
return v4, nil
}
}
return nil, errors.New("cannot find local IP address")
}
func getServiceURL(rootURL string) (url, urnDomain string, err error) {
r, err := http.Get(rootURL)
if err != nil {
return
}
defer r.Body.Close()
if r.StatusCode >= 400 {
err = errors.New(string(r.StatusCode))
return
}
var root Root
err = xml.NewDecoder(r.Body).Decode(&root)
if err != nil {
return
}
a := &root.Device
if strings.Index(a.DeviceType, "InternetGatewayDevice:1") < 0 {
err = errors.New("No InternetGatewayDevice")
return
}
b := getChildDevice(a, "WANDevice:1")
if b == nil {
err = errors.New("No WANDevice")
return
}
c := getChildDevice(b, "WANConnectionDevice:1")
if c == nil {
err = errors.New("No WANConnectionDevice")
return
}
d := getChildService(c, "WANIPConnection:1")
if d == nil {
// Some routers don't follow the UPnP spec, and put WanIPConnection under WanDevice,
// instead of under WanConnectionDevice
d = getChildService(b, "WANIPConnection:1")
if d == nil {
err = errors.New("No WANIPConnection")
return
}
}
// Extract the domain name, which isn't always 'schemas-upnp-org'
urnDomain = strings.Split(d.ServiceType, ":")[1]
url = combineURL(rootURL, d.ControlURL)
return
}
func combineURL(rootURL, subURL string) string {
protocolEnd := "://"
protoEndIndex := strings.Index(rootURL, protocolEnd)
a := rootURL[protoEndIndex+len(protocolEnd):]
rootIndex := strings.Index(a, "/")
return rootURL[0:protoEndIndex+len(protocolEnd)+rootIndex] + subURL
}
func soapRequest(url, function, message, domain string) (r *http.Response, err error) {
fullMessage := "<?xml version=\"1.0\" ?>" +
"<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">\r\n" +
"<s:Body>" + message + "</s:Body></s:Envelope>"
req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"")
req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3")
//req.Header.Set("Transfer-Encoding", "chunked")
req.Header.Set("SOAPAction", "\"urn:"+domain+":service:WANIPConnection:1#"+function+"\"")
req.Header.Set("Connection", "Close")
req.Header.Set("Cache-Control", "no-cache")
req.Header.Set("Pragma", "no-cache")
// log.Stderr("soapRequest ", req)
r, err = http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
/*if r.Body != nil {
defer r.Body.Close()
}*/
if r.StatusCode >= 400 {
// log.Stderr(function, r.StatusCode)
err = errors.New("Error " + strconv.Itoa(r.StatusCode) + " for " + function)
r = nil
return
}
return
}
type statusInfo struct {
externalIpAddress string
}
func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) {
message := "<u:GetExternalIPAddress xmlns:u=\"urn:" + n.urnDomain + ":service:WANIPConnection:1\">\r\n" +
"</u:GetExternalIPAddress>"
var response *http.Response
response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain)
if response != nil {
defer response.Body.Close()
}
if err != nil {
return
}
var envelope Envelope
data, err := ioutil.ReadAll(response.Body)
reader := bytes.NewReader(data)
xml.NewDecoder(reader).Decode(&envelope)
info = statusInfo{envelope.Soap.ExternalIP.IPAddress}
if err != nil {
return
}
return
}
func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) {
info, err := n.getExternalIPAddress()
if err != nil {
return
}
addr = net.ParseIP(info.externalIpAddress)
return
}
func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) {
// A single concatenation would break ARM compilation.
message := "<u:AddPortMapping xmlns:u=\"urn:" + n.urnDomain + ":service:WANIPConnection:1\">\r\n" +
"<NewRemoteHost></NewRemoteHost><NewExternalPort>" + strconv.Itoa(externalPort)
message += "</NewExternalPort><NewProtocol>" + protocol + "</NewProtocol>"
message += "<NewInternalPort>" + strconv.Itoa(internalPort) + "</NewInternalPort>" +
"<NewInternalClient>" + n.ourIP + "</NewInternalClient>" +
"<NewEnabled>1</NewEnabled><NewPortMappingDescription>"
message += description +
"</NewPortMappingDescription><NewLeaseDuration>" + strconv.Itoa(timeout) +
"</NewLeaseDuration></u:AddPortMapping>"
var response *http.Response
response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain)
if response != nil {
defer response.Body.Close()
}
if err != nil {
return
}
// TODO: check response to see if the port was forwarded
// log.Println(message, response)
// JAE:
// body, err := ioutil.ReadAll(response.Body)
// fmt.Println(string(body), err)
mappedExternalPort = externalPort
_ = response
return
}
func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) {
message := "<u:DeletePortMapping xmlns:u=\"urn:" + n.urnDomain + ":service:WANIPConnection:1\">\r\n" +
"<NewRemoteHost></NewRemoteHost><NewExternalPort>" + strconv.Itoa(externalPort) +
"</NewExternalPort><NewProtocol>" + protocol + "</NewProtocol>" +
"</u:DeletePortMapping>"
var response *http.Response
response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain)
if response != nil {
defer response.Body.Close()
}
if err != nil {
return
}
// TODO: check response to see if the port was deleted
// log.Println(message, response)
_ = response
return
}

15
p2p/util.go Normal file
View File

@@ -0,0 +1,15 @@
package p2p
import (
"crypto/sha256"
)
// doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes.
func doubleSha256(b []byte) []byte {
hasher := sha256.New()
hasher.Write(b)
sum := hasher.Sum(nil)
hasher.Reset()
hasher.Write(sum)
return hasher.Sum(nil)
}

3
p2p/version.go Normal file
View File

@@ -0,0 +1,3 @@
package p2p
const Version = "0.5.0"

View File

@@ -4,11 +4,12 @@ import (
"strings"
"testing"
. "github.com/tendermint/go-common"
abcicli "github.com/tendermint/abci/client"
"github.com/tendermint/abci/example/dummy"
"github.com/tendermint/abci/server"
"github.com/tendermint/abci/types"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
)
//----------------------------------------
@@ -44,44 +45,59 @@ func (app *appConnTest) InfoSync() (types.ResponseInfo, error) {
var SOCKET = "socket"
func TestEcho(t *testing.T) {
sockPath := Fmt("unix:///tmp/echo_%v.sock", RandStr(6))
sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6))
clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true)
// Start server
s, err := server.NewSocketServer(sockPath, dummy.NewDummyApplication())
if err != nil {
Exit(err.Error())
s := server.NewSocketServer(sockPath, dummy.NewDummyApplication())
s.SetLogger(log.TestingLogger().With("module", "abci-server"))
if _, err := s.Start(); err != nil {
t.Fatalf("Error starting socket server: %v", err.Error())
}
defer s.Stop()
// Start client
cli, err := clientCreator.NewABCIClient()
if err != nil {
Exit(err.Error())
t.Fatalf("Error creating ABCI client: %v", err.Error())
}
cli.SetLogger(log.TestingLogger().With("module", "abci-client"))
if _, err := cli.Start(); err != nil {
t.Fatalf("Error starting ABCI client: %v", err.Error())
}
proxy := NewAppConnTest(cli)
t.Log("Connected")
for i := 0; i < 1000; i++ {
proxy.EchoAsync(Fmt("echo-%v", i))
proxy.EchoAsync(cmn.Fmt("echo-%v", i))
}
proxy.FlushSync()
}
func BenchmarkEcho(b *testing.B) {
b.StopTimer() // Initialize
sockPath := Fmt("unix:///tmp/echo_%v.sock", RandStr(6))
sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6))
clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true)
// Start server
s, err := server.NewSocketServer(sockPath, dummy.NewDummyApplication())
if err != nil {
Exit(err.Error())
s := server.NewSocketServer(sockPath, dummy.NewDummyApplication())
s.SetLogger(log.TestingLogger().With("module", "abci-server"))
if _, err := s.Start(); err != nil {
b.Fatalf("Error starting socket server: %v", err.Error())
}
defer s.Stop()
// Start client
cli, err := clientCreator.NewABCIClient()
if err != nil {
Exit(err.Error())
b.Fatalf("Error creating ABCI client: %v", err.Error())
}
cli.SetLogger(log.TestingLogger().With("module", "abci-client"))
if _, err := cli.Start(); err != nil {
b.Fatalf("Error starting ABCI client: %v", err.Error())
}
proxy := NewAppConnTest(cli)
b.Log("Connected")
echoString := strings.Repeat(" ", 200)
@@ -98,19 +114,27 @@ func BenchmarkEcho(b *testing.B) {
}
func TestInfo(t *testing.T) {
sockPath := Fmt("unix:///tmp/echo_%v.sock", RandStr(6))
sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6))
clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true)
// Start server
s, err := server.NewSocketServer(sockPath, dummy.NewDummyApplication())
if err != nil {
Exit(err.Error())
s := server.NewSocketServer(sockPath, dummy.NewDummyApplication())
s.SetLogger(log.TestingLogger().With("module", "abci-server"))
if _, err := s.Start(); err != nil {
t.Fatalf("Error starting socket server: %v", err.Error())
}
defer s.Stop()
// Start client
cli, err := clientCreator.NewABCIClient()
if err != nil {
Exit(err.Error())
t.Fatalf("Error creating ABCI client: %v", err.Error())
}
cli.SetLogger(log.TestingLogger().With("module", "abci-client"))
if _, err := cli.Start(); err != nil {
t.Fatalf("Error starting ABCI client: %v", err.Error())
}
proxy := NewAppConnTest(cli)
t.Log("Connected")

Some files were not shown because too many files have changed in this diff Show More