Compare commits

...

507 Commits

Author SHA1 Message Date
Ethan Buchman
f6e28c4975 Merge pull request #461 from tendermint/release-v0.9.2
Release v0.9.2
2017-04-26 11:07:00 -04:00
Ethan Buchman
6bcd4242f1 CHANGELOG and version bump 2017-04-26 00:17:53 -04:00
Ethan Buchman
207798b09a Merge pull request #454 from tendermint/reset_fix
reset fix
2017-04-21 17:27:29 -04:00
rigel rozanski
c21cec002c reset fix 2017-04-21 16:39:50 -04:00
Ethan Buchman
f89279cf11 fix CHANGELOG 2017-04-21 13:39:02 -04:00
Ethan Buchman
44f25864d9 Merge pull request #451 from tendermint/release-v0.9.1
Release v0.9.1
2017-04-21 13:35:23 -04:00
Ethan Buchman
7f20eca892 update glide 2017-04-21 13:09:55 -04:00
Ethan Buchman
e8cad948e3 cli: ResetAll doesnt depend on cobra 2017-04-21 12:54:53 -04:00
Ethan Buchman
53e2b9693a export ResetAll cmd 2017-04-21 12:46:12 -04:00
Ethan Buchman
62860e4919 Merge pull request #453 from tendermint/feature/rename-txid-to-hash
Rename txid to hash
2017-04-21 12:16:08 -04:00
Anton Kaliaev
5e5fb37774 rename TxID to Hash 2017-04-21 18:39:02 +03:00
Ethan Buchman
7cf773e2d3 cli: testnet cmd inits files for testnet 2017-04-20 18:22:42 -04:00
Ethan Buchman
f5b77d50b5 fix setting log level 2017-04-20 17:56:49 -04:00
Ethan Buchman
757a548edf update glide 2017-04-20 17:56:35 -04:00
Ethan Buchman
a75353da6d version bump 2017-04-18 23:21:57 -04:00
Ethan Buchman
083fe959e2 Update README [ci skip] 2017-04-18 23:14:10 -04:00
Ethan Buchman
480b24d9f0 CHANGELOG [ci skip] 2017-04-18 23:05:48 -04:00
Ethan Buchman
aecf860b26 update glide 2017-04-18 22:27:24 -04:00
Ethan Buchman
bd369cc451 Merge pull request #450 from tendermint/fix-fastsync
blockpool: fix removePeer bug
2017-04-18 22:21:10 -04:00
Ethan Buchman
07143a4353 Merge pull request #412 from tendermint/feature/237-tx-indexing
tx indexing (Refs #237)
2017-04-18 22:20:48 -04:00
Ethan Buchman
bf7521a6ab Merge branch 'develop' into feature/237-tx-indexing 2017-04-18 22:20:13 -04:00
Ethan Buchman
fdeceb9a74 Merge pull request #449 from tendermint/fix-replay
Fix replay
2017-04-18 22:18:34 -04:00
Ethan Buchman
52d03d0071 post rebase fixes 2017-04-18 21:35:00 -04:00
Ethan Buchman
29a893b193 update comment 2017-04-18 21:28:11 -04:00
Jae Kwon
cf4074cc80 defer gr.Close() fixes 2017-04-18 21:28:10 -04:00
Ethan Buchman
cb2ed5bb7c fixes from review 2017-04-18 21:28:10 -04:00
Ethan Buchman
2ba3656ffd wal: gr.Close() 2017-04-18 21:28:10 -04:00
Jae Kwon
cd9e9e9f45 s/ExecBlock/ValExecBlock/g; s/sm.ApplyBlock/sm.ExecCommitBlock/g 2017-04-18 21:28:10 -04:00
Ethan Buchman
f9d0096744 support #HEIGHT based WAL 2017-04-18 21:28:10 -04:00
Ethan Buchman
935f70a346 comments and cleanup 2017-04-18 21:28:10 -04:00
Ethan Buchman
5109746b1c Handshake uses ApplyBlock, no ConsensuState 2017-04-18 21:28:10 -04:00
Ethan Buchman
1684ec163f ABCIResponses not needed as field in state 2017-04-18 21:27:50 -04:00
Ethan Buchman
3a973b80ac update glide 2017-04-18 21:27:31 -04:00
Ethan Buchman
ed03cb5c17 consensus/replay: remove timeout 2017-04-18 21:27:31 -04:00
Ethan Buchman
54b26869d5 consensus/wal: #HEIGHT -> #ENDHEIGHT 2017-04-18 21:27:31 -04:00
Ethan Buchman
cb279bf662 state: ABCIResponses, s.Save() in ApplyBlock 2017-04-18 21:27:31 -04:00
Ethan Buchman
9d2de2b756 tx_indexer -> tx_index 2017-04-18 20:55:40 -04:00
Ethan Buchman
45876d0828 NewBatch takes size, batch.Add doesn't use append 2017-04-18 20:23:58 -04:00
Ethan Buchman
b6a04a3456 more fixes from review 2017-04-18 20:11:53 -04:00
Ethan Buchman
f4d0076344 TxResult includes Tx. /tx only works if indexer active 2017-04-18 19:56:41 -04:00
Ethan Buchman
d572bb0c5d state/txindex and pkg per indexer impl 2017-04-18 19:29:02 -04:00
Ethan Buchman
00847cdc6b blockpool: fix removePeer bug 2017-04-15 02:22:03 -04:00
Ethan Buchman
16bffdf7ab rpc/test: restore txindexer after setting null 2017-04-13 16:38:44 -04:00
Ethan Frey
c648bd5b31 Test /tx with indexer disabled 2017-04-13 22:26:07 +02:00
Ethan Buchman
6e065affe5 rpc: /tx allows height+hash 2017-04-13 16:04:36 -04:00
Ethan Frey
4ee9acb8a7 Improve tx tests for both prove true/false 2017-04-13 16:04:35 -04:00
Ethan Frey
20458564b2 Expose Tx method in the clients 2017-04-13 21:49:21 +02:00
Ethan Buchman
c848056438 rpc: better arg validation for /tx 2017-04-13 15:18:58 -04:00
Ethan Frey
b33a7c46ce Break new Tx rpc endpoint 2017-04-13 20:49:06 +02:00
Ethan Frey
a4ee7d25d1 Add TxIndexEnabled method to ResultStatus 2017-04-13 20:21:40 +02:00
Ethan Buchman
58c860ba11 rpc/test: /tx 2017-04-13 14:18:35 -04:00
Ethan Buchman
90d1ed87fd add tx_indexer to NodeInfo 2017-04-13 14:18:20 -04:00
Ethan Buchman
df35989742 /tx can take height+index or hash 2017-04-13 13:47:48 -04:00
Ethan Buchman
23dbd0c8c3 enable tx_indexer by default 2017-04-13 13:36:12 -04:00
Ethan Buchman
257d81ddd1 rpc/core/types: uintX -> int 2017-04-13 13:35:16 -04:00
Ethan Buchman
585ce45a5e rpc: dial_seeds msg. addresses #403 2017-04-12 19:12:22 -04:00
Ethan Buchman
6899c91ebe add optional 'prove' flag to /tx 2017-04-12 18:55:00 -04:00
Ethan Buchman
7fb0e8b30b Merge branch 'feature/tx-proof' into feature/237-tx-indexing
Conflicts:
	types/tx.go
2017-04-12 18:48:39 -04:00
Ethan Buchman
a1387c7c17 remove expected panic in test 2017-04-12 18:45:37 -04:00
Ethan Buchman
ffe6d58a58 add Height to ResultBroadcastTxCommit and EventDataTx 2017-04-12 18:33:48 -04:00
Ethan Buchman
2a59cda77e /tx returns tx bytes 2017-04-12 18:18:17 -04:00
Ethan Buchman
2742831fb9 Merge pull request #443 from tendermint/draw_deps
Draw deps
2017-04-12 17:50:38 -04:00
Adrian Brink
6bcbd14d5a Add line counting badge 2017-04-12 17:39:47 -04:00
Adrian Brink
cf875a51fd Fix draw_deps in Makefile and add resulting dependency graph 2017-04-12 17:39:44 -04:00
Ethan Buchman
c34e136be9 update go-rpc to support map and array params 2017-04-12 13:44:10 -04:00
Ethan Frey
705e7bd577 Implemented and tested Txs.Index, hopefully better coverage 2017-04-12 15:18:09 +02:00
Ethan Frey
285a2a7061 More thorough testing of mutated bytes, use fixed go-wire 2017-04-12 15:18:09 +02:00
Ethan Frey
fd68bc7cfd Test Tx proofs secure 2017-04-12 15:16:46 +02:00
Ethan Frey
28307fd4c9 Add proof generation for one tx 2017-04-12 15:16:46 +02:00
Ethan Buchman
72b2be51ec Merge pull request #446 from tendermint/fix-develop
Fix develop
2017-04-11 16:16:10 -04:00
Ethan Buchman
9775ecde99 update glide 2017-04-11 15:45:21 -04:00
Ethan Frey
c0f026a9b3 update go-rpc to fix race condition 2017-04-11 15:45:16 -04:00
Ethan Buchman
ac86e664c7 Revert "Undo last two commits"
This reverts commit d1fc37ff9e.
2017-04-11 15:44:36 -04:00
Ethan Frey
d1fc37ff9e Undo last two commits 2017-04-11 12:57:06 +02:00
Ethan Buchman
7fbe8e47d4 fix tests 2017-04-10 17:32:48 -04:00
Ethan Buchman
d7c5690f17 index by bytes. add TxID to broadcast_tx responses 2017-04-10 17:21:37 -04:00
Ethan Frey
d3069b0f5b Update abci develop 2017-04-10 22:46:03 +02:00
Ethan Frey
e4e17a2c95 Play well with go-{rpc,crypto,data}:develop 2017-04-10 21:16:41 +02:00
Anton Kaliaev
3478de50a1 no need for map - tx responses should arrive in order (Refs #237)
```
me: so we are executing them in order and receiving them in order and there is no way we could receive them out of order (due to network or something else), correct?
ebuchman: if we receive them out of order, ABCI is broken
ebuchman: so it is possible, if the ABCI server we're talking to is not implementing the spec
ebuchman: but that shouldn't justify us building a map
```
2017-04-10 22:44:08 +04:00
Anton Kaliaev
63704454a3 expose /tx?hash="XXXXXXXXXXXX" RPC call 2017-04-10 22:44:07 +04:00
Anton Kaliaev
b08f29cb71 add config option for tx indexing and disable it by default 2017-04-10 22:44:07 +04:00
Anton Kaliaev
d62e85757f execution test 2017-04-10 22:44:07 +04:00
Anton Kaliaev
c3f1b08b6a tx indexing (Refs #237)
save transactions to blockstore

move to a separate module

benchmark KVIndexer

batch write transactions

Benchmarks:

```
BenchmarkKVIndexerIndex-2         100000            516300 ns/op
PASS
ok      github.com/tendermint/tendermint/blockchain/tx  56.506s

5,16 s for 10000 transactions
1 s for 2000 transactions
```

```
BenchmarkKVIndexerIndex-2       h 3000000             8622 ns/op
PASS
ok      github.com/tendermint/tendermint/blockchain/tx  34.210s

86 ms for 10000 transactions
16 ms for 2000 transactions
```

```
BenchmarkKVIndexerIndex1-2               5000000              7160 ns/op
BenchmarkKVIndexerIndex500-2               20000           1750411 ns/op
BenchmarkKVIndexerIndex1000-2              10000           3573973 ns/op
BenchmarkKVIndexerIndex2000-2               5000           7836851 ns/op
BenchmarkKVIndexerIndex10000-2              1000          33438980 ns/op
PASS
ok      github.com/tendermint/tendermint/blockchain/tx  209.482s

7,8 ms for 2000 transactions
```

[state] write test for ApplyBlock

review comments

- move txindexer to state
- fix type

save Tx Index as well

do not store tx itself in the result
2017-04-10 22:44:07 +04:00
Ethan Buchman
fc95c9872f Merge pull request #438 from tendermint/replay-fix
consensus: timeout on replayLastBlock
2017-04-10 10:33:48 -04:00
Ethan Buchman
09f7dabd5e update comment 2017-03-28 14:06:03 -04:00
Ethan Buchman
4fd1471f11 remove BaseService.OnStart 2017-03-28 12:09:11 -04:00
Ethan Buchman
85e83934a1 fixes from review 2017-03-28 12:07:32 -04:00
Ethan Buchman
162fbdd17b Merge pull request #419 from tendermint/cli_cobra
added use of Cobra CLI
2017-03-27 17:16:43 -04:00
Ethan Buchman
0413a87eb4 fix typo 2017-03-27 16:59:54 -04:00
Ethan Buchman
c1dc1a1a45 Merge pull request #437 from tendermint/feature/431-data-flag
rename TMROOT to TMHOME
2017-03-27 15:47:02 -04:00
Ethan Buchman
077cf13a1f consensus: timeout on replayLastBlock 2017-03-27 15:41:45 -04:00
Anton Kaliaev
12ead6cc7e make changes backwards compatible (Refs #431) 2017-03-27 20:41:00 +04:00
Anton Kaliaev
5f6de800a0 rename TMROOT to TMHOME (Refs #431) 2017-03-27 15:17:10 +04:00
Ethan Buchman
5420254b36 changelog: add prehistory 2017-03-09 01:34:11 -05:00
Ethan Buchman
55b3c22d99 publish.sh to push build to s3 2017-03-09 01:34:11 -05:00
Anton Kaliaev
a4154e76c5 update docker readme [ci skip] [circleci skip] 2017-03-08 14:09:54 +04:00
Anton Kaliaev
524f3b2d57 Merge pull request #429 from tendermint/update-docker-image-for-0.9.0
update Dockerfile for 0.9.0 release
2017-03-08 14:06:57 +04:00
Anton Kaliaev
d474baeeea update Dockerfile for 0.9.0 release 2017-03-08 14:05:32 +04:00
rigelrozanski
4e743649be glide update 2017-03-06 17:36:53 -05:00
rigelrozanski
fab518fc98 flag fix, glide update
squash
2017-03-06 17:32:42 -05:00
rigelrozanski
fa609366d4 melekes change request 2017-03-06 17:30:06 -05:00
rigelrozanski
569fd474c2 added use of Cobra CLI
squash
2017-03-06 17:30:06 -05:00
Ethan Buchman
d4f6254551 Merge pull request #426 from tendermint/release-0.9.0
Release 0.9.0
2017-03-06 05:34:45 -05:00
Ethan Buchman
e31ed6dc2f Merge branch 'master' into release-0.9.0 (woops) 2017-03-06 04:27:38 -05:00
Ethan Buchman
8449e9794a CHANGELOG 2017-03-06 04:25:55 -05:00
Ethan Buchman
07a9242dba update glide 2017-03-06 04:02:09 -05:00
Ethan Buchman
097da55a2c test/p2p: shellcheck 2017-03-06 03:49:48 -05:00
Ethan Buchman
caaafa192b test/persist: use unix socket for rpc 2017-03-06 02:42:00 -05:00
Ethan Buchman
fc6d22db32 test: better client naming 2017-03-06 01:11:00 -05:00
Ethan Buchman
d58a666445 test/persist: bump sleep to 5 for bound ports release 2017-03-06 00:32:24 -05:00
Ethan Buchman
22d95c7b51 test: docker exec doesnt work on circle 2017-03-05 23:38:11 -05:00
Ethan Buchman
b1cd677711 types: valSet LastProposer->Proposer and Proposer()->GetProposer() 2017-03-05 23:28:42 -05:00
Ethan Buchman
1208296dc0 DialSeeds takes an AddrBook 2017-03-05 23:14:15 -05:00
Ethan Buchman
cfc7fed31c test/pex: dial_seeds 2017-03-05 23:14:11 -05:00
Ethan Buchman
e4e70ece3f test: fix docker and apps 2017-03-05 20:39:52 -05:00
Ethan Buchman
0fa34f7f67 fix ProposerSelection by persisting proposer 2017-03-05 19:45:24 -05:00
Ethan Buchman
55602b9be6 failing ProposerSelection test 2017-03-05 15:05:36 -05:00
Ethan Buchman
749df0536f test/docker: install abci apps first 2017-03-05 14:59:02 -05:00
Ethan Buchman
2037d2631a fix race 2017-03-05 03:40:36 -05:00
Ethan Buchman
de0153a1c4 consensus: some more informative logging 2017-03-05 02:15:46 -05:00
Ethan Buchman
d93d754972 test/p2p/fast_sync: use --pex on restart 2017-03-05 02:03:49 -05:00
Ethan Buchman
a5ce4f6c36 update glide 2017-03-05 00:19:53 -05:00
Ethan Buchman
05d8cd50b5 update glide and node.go for update to p2p.AddrBook 2017-03-04 23:45:54 -05:00
Anton Kaliaev
af5cd5cc75 [p2p tests] test other peers connect to us
if we have neither seeds nor the addrbook
2017-03-04 23:23:02 -05:00
Anton Kaliaev
4090a31d19 save seeds to addrBook (Refs #335) 2017-03-04 23:23:02 -05:00
Anton Kalyaev
163fe1731b test p2p pex reactor (Refs #335) 2017-03-04 23:22:48 -05:00
Anton Kalyaev
eef9124d1b fix typo 2017-03-04 23:21:57 -05:00
Ethan Buchman
027cb8dc6b glide: use versions where applicable 2017-03-04 22:11:44 -05:00
Ethan Buchman
c7386b139b glide update 2017-03-04 22:05:14 -05:00
rigelrozanski
f5c4fdc82a seeds fix 2017-03-04 21:43:03 -05:00
Ethan Buchman
8352ec4e5d circle sigh 2017-03-03 19:58:17 -05:00
Ethan Buchman
8192bb0aaf stop rpc listeners in node.OnStop() 2017-03-03 19:47:39 -05:00
Ethan Buchman
b13924701e test/persist: wait for ports to be freed 2017-03-03 18:38:40 -05:00
Ethan Buchman
7098e5c7eb glide update 2017-03-03 01:33:13 -05:00
Ethan Buchman
4747f14a2c version bump to 0.9.0 2017-03-02 23:58:55 -05:00
Ethan Buchman
692691938c remove comment 2017-03-02 23:57:28 -05:00
Ethan Buchman
a200505ae5 Merge pull request #422 from tendermint/fix-race
Fix race
2017-03-02 23:56:18 -05:00
Ethan Buchman
8ba79252c8 types: use mtx on PartSet.String() 2017-03-02 23:50:59 -05:00
Ethan Buchman
6456654307 test: add extra kill after fail index triggered 2017-03-02 20:46:30 -05:00
Ethan Frey
21501815dd Fix EventSwitch usage in WaitForOneEvent 2017-02-27 14:55:29 +01:00
Ethan Frey
d56cb2ab4b fix typo 2017-02-24 23:40:05 +01:00
Ethan Frey
4fead237f0 Client embeds EventSwitch, client.HTTP properly un/subscribes events over websocket 2017-02-24 21:26:17 +01:00
Ethan Frey
6282fad518 Clean up event switch add helper function 2017-02-24 19:59:40 +01:00
Ethan Frey
175bb329e4 Expose EventSwitch on top of websocket client 2017-02-24 19:15:22 +01:00
Ethan Frey
9be3064904 Expose and test EventSwitch in client.Local 2017-02-24 17:51:19 +01:00
Ethan Frey
98450ee2db Client: DumpConsensusState, not DialSeeds. Cleanup 2017-02-24 17:05:15 +01:00
Ethan Frey
931af6a072 Combine local and http into client package, unify tests with table-driven tests 2017-02-24 17:05:15 +01:00
Ethan Frey
202146e4ce Fix up checktx/delivertx in broadcastTx(A)sync 2017-02-24 17:05:15 +01:00
Ethan Frey
9693795c4c Move common code to merkleeyes/testutil 2017-02-24 17:05:15 +01:00
Ethan Frey
cd9ee9d84b cleanup 2017-02-24 17:05:15 +01:00
Ethan Frey
42a9b847ec Make all client tests safe to run in parallel 2017-02-24 17:05:15 +01:00
Ethan Frey
d92a5b1074 Reworked WaitForHeight to avoid race conditions 2017-02-24 17:05:14 +01:00
Ethan Frey
f7f7cf576a Fix race condition in websocket event listening 2017-02-24 17:05:14 +01:00
Ethan Frey
26f4b5c98e Clean up package names 2017-02-24 17:05:14 +01:00
Ethan Frey
70f19e809b Add MockStatus, WaitForHeight helper 2017-02-24 17:05:14 +01:00
Ethan Frey
0905332f1d MockClient for real abci app 2017-02-24 17:05:14 +01:00
Ethan Frey
df172fa840 Provide mock interfaces for calling abci app over tendermint rpc 2017-02-24 17:05:14 +01:00
Ethan Frey
ce044dbb76 Extracted Clients into a consistent interface, fixed type issue in http.Client 2017-02-24 17:05:14 +01:00
Ethan Frey
7c26be3242 Begin implementation of local client 2017-02-24 17:05:13 +01:00
Ethan Frey
bf1ee89b27 Moved httpclient into subpackage 2017-02-24 17:05:13 +01:00
Ethan Frey
a0bdae4f9c Added missing pkg/errors dependency 2017-02-24 17:05:13 +01:00
Ethan Frey
64feaa05f4 Full test coverage for rpc client 2017-02-24 17:05:13 +01:00
Ethan Frey
5ea3f24304 test info 2017-02-24 17:05:13 +01:00
Ethan Frey
c9d36cd713 Add dependencies, pull out HTTPClient test code 2017-02-24 17:05:13 +01:00
Ethan Frey
2c75c9daf9 Clean up tests, remove panics 2017-02-24 17:05:12 +01:00
Ethan Frey
d971416d12 collision merge of light-client code 2017-02-24 17:05:12 +01:00
Anton Kaliaev
1f1dcead3d [Vagrantfile] go 1.8 and permissions fix 2017-02-24 13:31:27 +04:00
Anton Kaliaev
d38a6e329f verbose output when get_deps 2017-02-24 13:30:46 +04:00
Anton Kaliaev
f73f53c486 [Dockerfile.develop] fix error "/bin/sh: #: not found" when building 2017-02-23 12:17:20 +04:00
Ethan Buchman
b8279d424c Merge pull request #414 from tendermint/ci
Ci
2017-02-21 15:17:16 -05:00
Ethan Buchman
98a509ed97 Merge pull request #373 from melekes/bugfix/174-peer-send-failures-are-not-checked
Check `peer.Send` failures
2017-02-21 15:00:50 -05:00
Ethan Buchman
7c1e79cbc5 test/persists: wait for tendermint proc 2017-02-21 14:14:08 -05:00
Ethan Buchman
7fab31fbe3 test: more logging 2017-02-21 11:18:40 -05:00
Anton Kaliaev
2c724d5eee remove warning messages in favor of "Send failed" 2017-02-21 13:25:16 +04:00
Anton Kaliaev
6dbe9febce log warning if peer send failed (Refs #174)
make lint happy

remove dead code

remove not needed go-common dependency

check peer.Send failures (Refs #174)
2017-02-21 11:57:33 +04:00
Ethan Buchman
d754d210cd test: only use syslog on circle 2017-02-21 01:28:58 -05:00
Ethan Buchman
62adbe69ff Merge pull request #413 from tendermint/ci
test: forward CIRCLECI var through docker
2017-02-20 23:11:22 -05:00
Ethan Buchman
f3da6d23cb test: forward CIRCLECI var through docker 2017-02-20 22:53:14 -05:00
Ethan Buchman
a4d5ec491e Merge pull request #411 from tendermint/bugfix/init-should-not-overwrite-existing-files
do not overwrite existing files when doing `tendermint init`
2017-02-20 22:03:22 -05:00
Ethan Buchman
bed86da8ae Merge pull request #408 from tendermint/mock_app
handshake replay through consensus using mockApp
2017-02-20 22:01:26 -05:00
Ethan Buchman
0c4b6cd071 consensus: more handshake replay tests 2017-02-20 21:45:53 -05:00
Ethan Buchman
f9df4294f3 move some interfaces to types/services.go 2017-02-20 20:09:15 -05:00
Ethan Buchman
0765613778 move handshake to consensus package 2017-02-20 19:52:36 -05:00
Ethan Buchman
1fa6e7f3b1 test: shellcheck 2017-02-20 18:51:00 -05:00
Ethan Buchman
2b1b8da58d test/persist: dont use log files on circle 2017-02-20 18:41:29 -05:00
Ethan Buchman
756213c5f5 check appHash 2017-02-20 17:08:38 -05:00
Ethan Buchman
bc67859672 make ReplayBlocks logic exhaustive 2017-02-20 16:32:48 -05:00
Anton Kaliaev
a99885cd43 do not overwrite existing files when doing tendermint init (Fixes #410) 2017-02-20 11:14:27 +04:00
Ethan Buchman
44d472ddd3 comments from review 2017-02-18 22:15:59 -05:00
Ethan Buchman
7228b11e3f state: remove StateIntermediate 2017-02-17 19:13:35 -05:00
Ethan Buchman
0bec99fbd4 consensus: handshake replay test using wal 2017-02-17 19:12:05 -05:00
Ethan Buchman
3c5adebcd3 applyBlock to simplify replay of many blocks. still wip 2017-02-17 11:32:56 -05:00
Ethan Buchman
edc5e272db consensus: nice error msg if ApplyBlock fails 2017-02-17 10:57:09 -05:00
Ethan Buchman
6403b2f468 fixes for handshake replay through consensus 2017-02-17 10:51:05 -05:00
Ethan Buchman
cbe6dbe7a1 handshake replay through consensus using mockApp 2017-02-16 17:56:45 -05:00
Ethan Buchman
3ebd69db1b Merge pull request #405 from tendermint/rpc-commit
Rpc commit
2017-02-16 16:11:39 -05:00
Ethan Buchman
a3898fae0f rpc: fix SeenCommit condition 2017-02-16 15:35:34 -05:00
Ethan Buchman
75c856ed3f Merge pull request #404 from tendermint/feature/develop-docker-container
dockerfile for develop branch
2017-02-16 13:21:54 -05:00
Ethan Buchman
d081485e6a Merge pull request #402 from tendermint/feature/collect-logs-from-docker-containers
collect and add docker logs to CircleCI artifacts (Refs #387)
2017-02-16 13:13:13 -05:00
Ethan Buchman
bb5688b1be make: dont use -v on go test 2017-02-14 17:09:47 -05:00
Ethan Buchman
6f83f0bd4e glide update 2017-02-14 17:09:12 -05:00
Ethan Buchman
99b068b313 BlockMeta uses BlockID 2017-02-14 17:06:58 -05:00
Ethan Buchman
e229c8c3d7 rpc: /commit 2017-02-14 17:06:47 -05:00
Anton Kaliaev
6b499e2a8b add Makefile for docker files [ci skip] [circleci skip] 2017-02-15 01:10:29 +04:00
Anton Kaliaev
a756a9feba add readme mirroring https://hub.docker.com/r/tendermint/tendermint/ 2017-02-15 01:10:29 +04:00
Anton Kaliaev
d0ca0cb308 otherwise we end up with unmount errors due to busy disk
```
local_testnet_1
Failed to remove container (local_testnet_1): Error response from daemon: Unable to remove filesystem for 74e1104c705d4f21137bbe9e6710f33511948fc04d26475b482d1314fe70d537: remove /var/lib/docker/containers/74e1104c705d4f21137bbe9e6710f33511948fc04d26475b482d1314fe70d537/shm: device or resource busy
local_testnet_2
Failed to remove container (local_testnet_2): Error response from daemon: Unable to remove filesystem for 6f2ec44e01afd85875c1d9958afa957f1623ed9464df2b4fcac06feb8254145e: remove /var/lib/docker/containers/6f2ec44e01afd85875c1d9958afa957f1623ed9464df2b4fcac06feb8254145e/shm: device or resource busy
local_testnet_3
Failed to remove container (local_testnet_3): Error response from daemon: Unable to remove filesystem for 6160c73d45376920d90473a487a07f0e283e383011a0d23050b22cd82ab5d255: remove /var/lib/docker/containers/6160c73d45376920d90473a487a07f0e283e383011a0d23050b22cd82ab5d255/shm: device or resource busy
local_testnet_4
Failed to remove container (local_testnet_4): Error response from daemon: Driver btrfs failed to remove root filesystem c93a30d66ff3710617915a8f6075755ebc45aebf73569420225105cc81003e2f: Failed to destroy btrfs snapshot /var/lib/docker/btrfs/subvolumes for cac56069a3682b6d133e239f479fb82a67c4ccd09b877794d9f440a1125d59d6: operation not permitted

* [15:37:40] stopping rsyslog container
Failed to remove container (rsyslog): Error response from daemon: Driver btrfs failed to remove root filesystem 477287a170a48156e663c55092257064e1f14770d0b6c634a9972893636d9451: Failed to destroy btrfs snapshot /var/lib/docker/btrfs/subvolumes for 76b66d582a478743c099f479bf88eb334ad303ef607f91021dfa58d28c9161eb: operation not permitted
make: *** [test_integrations] Error 1
```
2017-02-14 20:40:23 +04:00
Anton Kaliaev
86f85525dd dockerfile for develop branch 2017-02-14 20:30:15 +04:00
Anton Kaliaev
ad354c4c48 stop containers to avoid futher errors
Error:

```
Failed to remove container (rsyslog): Error response from daemon: Unable to remove filesystem
```
2017-02-14 19:23:28 +04:00
Anton Kaliaev
b5bb1657d2 increase memory quota for Vagrant users
`make test_integrations` requires > 2Gi of memory
2017-02-14 19:09:03 +04:00
Anton Kaliaev
1275458c3f collect and add docker logs to CircleCI artifacts (Refs #387)
How: 1) we start syslog docker container 2) all other containers use
syslog logging driver to ship their logs to that container
2017-02-14 19:02:11 +04:00
Ethan Buchman
ac971c1a19 some docs fixes 2017-02-13 21:40:26 -05:00
Ethan Buchman
7d91d4300b update glide 2017-02-13 21:07:26 -05:00
Ethan Buchman
e972b942e6 Merge pull request #399 from tendermint/feature/cli-cleanup-gen-validator-output
[cli] cleanup gen_validator output
2017-02-09 13:32:30 -05:00
Anton Kaliaev
4896364952 [cli] cleanup gen_validator output (Fixes #396)
so we can pipe result to a file:

```
tendermint gen_validator > example.json
```

before this we had to cut first 3 lines:

```
tendermint gen_validator | sed 1,3d > example.json
```
2017-02-08 23:24:19 +04:00
Anton Kaliaev
ca21b6be93 update Vagrantfile
* glide now gets installed as a part of make get_vendor_deps
* rm go .tar.gz file
* autoremove garbage
* set LC_ALL to en_US.UTF-8
2017-02-08 22:56:17 +04:00
Ethan Frey
d4b3dde853 make install now uses vendored dependencies 2017-02-08 19:28:25 +01:00
Ethan Buchman
1aceeb7116 Merge pull request #392 from tendermint/feature/issue-template
ISSUE_TEMPLATE for Github
2017-02-08 02:17:11 -05:00
Ethan Buchman
930b3bcf5c Merge pull request #393 from tendermint/feature/CONTRIBUTING.md
contributing.md
2017-02-08 02:16:22 -05:00
Ethan Buchman
937c06a0c9 Merge pull request #395 from tendermint/feature/alpine-docker-base-image
[docker] move to alpine base image
2017-02-08 02:11:57 -05:00
Ethan Buchman
77cc15a77a Merge pull request #394 from tendermint/feature/improve-circleci-setup
Improve CircleCI setup (WIP)
2017-02-07 09:25:07 -05:00
Anton Kaliaev
d5d7286cb6 improve circleci config
- update docker machine version to 0.9.0
- save coverage.txt as artifact
- save docker logs as artifact
- test if coverage.txt exists
- 2>&1 redirect stderr to stdout in tests
2017-02-07 13:16:47 +04:00
Anton Kaliaev
bf64dd21fd [docker] move to alpine base image
813Mb => 29.5Mb
2017-02-06 00:11:03 +04:00
Ethan Frey
36b5d86eda [ci skip] [circle skip] Document research on backing dbs 2017-02-05 16:29:37 +01:00
Ethan Frey
1bf5ae9c8d Finish main argument on merkle data store architecture 2017-02-03 19:44:08 +01:00
Ethan Frey
5c2ead741c Lots more text on transactions and interface 2017-02-03 18:54:19 +01:00
Ethan Frey
b31e5d7764 Starting some thoughts on merkle storage 2017-02-03 15:39:35 +01:00
Anton Kaliaev
e5ef3e3019 add ISSUE_TEMPLATE for github [ci skip] [circle skip] 2017-02-03 13:07:59 +04:00
Anton Kaliaev
d696550494 add CONTRIBUTING.md [ci skip] [circle skip] 2017-02-03 13:07:11 +04:00
Ethan Frey
5fe0440916 Strawman docs/architecture dir 2017-02-02 20:10:56 +01:00
Ethan Buchman
d545ac4c51 Merge pull request #383 from tendermint/feature/rename-dockerimage-to-tendermint
Rename docker image to tendermint
2017-01-31 11:18:18 -08:00
Ethan Buchman
23eb8691a8 Merge pull request #386 from tendermint/bugfix/385-dist-build-error
permissions error during make dist
2017-01-31 11:07:28 -08:00
Anton Kaliaev
f849e2c414 copy instead of move (Fixes #385)
```
mv: cannot move ‘./build/dist/windows_386.zip’ to ‘./build/dist/tendermint_0.8.0_windows_386.zip’: Permission denied
```
2017-01-31 11:30:54 +04:00
Anton Kaliaev
1f527c5013 fix go version in Vagrantfile 2017-01-30 12:03:09 +04:00
Anton Kaliaev
65dfacac4b do we really have to build in order to test? 2017-01-30 12:02:24 +04:00
Anton Kaliaev
c0e7d05b5c rename tmbase image to tendermint
The main reason is people usually expect docker image to have the same
name as the repo. Plus, tendermint is cleaner than tmbase.

tmbase would make sense if we had multiple docker images within
tendermint, but we don't.
2017-01-30 11:59:35 +04:00
Jae Kwon
67ab574e98 Cleanup, add stub for VerifyCommitAny 2017-01-29 13:50:53 -08:00
Ethan Buchman
846e6d3bda fix readme links [ci skip] 2017-01-29 13:38:54 -08:00
Ethan Buchman
9707cb4471 README for local testnets [ci skip] 2017-01-28 19:25:28 -05:00
Jae Kwon
7c15b54ccc Fix test/app/dummy_test.sh 2017-01-28 09:11:31 -08:00
Jae Kwon
1af930441c Support new Query message for proofs 2017-01-28 08:27:13 -08:00
Ethan Buchman
9257d648bf test: update docker to 1.7.4 2017-01-28 00:12:39 -05:00
Ethan Buchman
f890ae696a Merge remote-tracking branch 'anton/feature/binaries' into unstable 2017-01-27 23:22:47 -05:00
Ethan Buchman
82828524fe Merge remote-tracking branch 'anton/improvement/dockerfile' into unstable 2017-01-27 22:59:01 -05:00
Ethan Buchman
d120c328c9 Merge remote-tracking branch 'anton/feature/output-all-tendermint-commands' into unstable 2017-01-27 22:54:14 -05:00
rigelrozanski
449a29b817 fixed README link for contributing guidelines 2017-01-27 22:44:01 -05:00
Anton Kaliaev
ce18332b52 update Dockerfile
- update golang to 1.7.4
- version as env variable
- change DATA_ROOT from /tendermint/data to /tendermint (it's not just
  data that gets stored in DATA_ROOT; we create data folder on start; as
  a result we get /tendermint/data/data, which is confusing)
- remove noninteractive env vars (do we really need these?)
- remove nodejs dep (some apps may require nodejs, but core is not one
  of them; it was convenient before, but now I believe we ought to
  remove it because other people who are using java do not want a
  bloated container with nodejs)
- build tendermint inside a container (once again, it was convenient
  before, but now I am testing kubernetes and I don't want to wait every
  time TM compiles)
2017-01-27 21:10:13 +04:00
Anton Kaliaev
3b7a1d7149 check that we have enough arguments
Otherwise:

```
panic: runtime error: index out of range

goroutine 1 [running]:
panic(0xbb8de0, 0xc82000e080)
        /usr/local/go/src/runtime/panic.go:464 +0x3e6
main.main()
        /go/src/github.com/tendermint/tendermint/cmd/tendermint/main.go:48 +0x811
```
2017-01-24 21:20:29 +04:00
Anton Kaliaev
17e822757b output all commands 2017-01-24 21:19:45 +04:00
Anton Kaliaev
a388ff198d try to get version from version/version.go 2017-01-23 15:23:39 +04:00
Anton Kaliaev
cc2457f7d5 dist command to make binaries and package them for distribution 2017-01-23 14:45:13 +04:00
Anton Kaliaev
c6375e414d refactor Makefile
- mute most of the commands
- replace github.com/tendermint/tendermint with just "."
- introduce PACKAGES variable
- delete unused NEWLINE
2017-01-22 18:04:58 +04:00
Anton Kaliaev
e825d77736 add .editorconfig to maintain consistent coding style 2017-01-22 18:00:08 +04:00
Ethan Buchman
ce026f4099 Merge pull request #369 from tendermint/node_refactor
Refactor Node; Node is a simple BaseService
2017-01-20 14:19:37 -05:00
Jae Kwon
9a2dd8bc92 Refactor Node; Node is a simple BaseService 2017-01-15 16:59:10 -08:00
Jae Kwon
a073b1db9c Refactor replay console -> replay_file in consensus/replay_file.go 2017-01-15 16:19:02 -08:00
Jae Kwon
cf0cb9558a Clean glide files 2017-01-14 21:03:12 -08:00
Jae Kwon
0aecfe2dae Merge pull request #368 from tendermint/release-0.8.0
Merge release-0.8.0 to develop
2017-01-13 16:03:27 -08:00
Jae Kwon
764091dfbb Merge pull request #366 from tendermint/release-0.8.0
Release 0.8.0
2017-01-13 15:59:34 -08:00
Ethan Buchman
ab1fa4db8c test: split up test/net/test.sh 2017-01-13 17:20:03 -05:00
Ethan Buchman
9123e63a33 more glide updates 2017-01-13 00:54:06 -05:00
Ethan Buchman
ba7ca4f372 fix tests 2017-01-13 00:54:01 -05:00
Ethan Buchman
1e25e9f58f glide update for go-db test fix 2017-01-12 22:49:52 -05:00
Ethan Buchman
9b4660b458 version bump to 0.8.0 2017-01-12 22:07:55 -05:00
Ethan Buchman
16a5c6b2c8 update all glide deps to master 2017-01-12 22:05:03 -05:00
Ethan Buchman
f347143b3d Merge branch 'master' into develop
Conflicts:
	cmd/tendermint/flags.go
	glide.lock
	glide.yaml
	node/node.go
	rpc/core/routes.go
	version/version.go
2017-01-12 20:48:10 -05:00
Ethan Buchman
c1952523cd update glide 2017-01-12 20:22:23 -05:00
Ethan Buchman
1afe0cb45f test: always rebuild grpc_client 2017-01-12 18:45:41 -05:00
Ethan Buchman
7075a8ed81 Merge pull request #365 from tendermint/rename
Rename
2017-01-12 16:41:52 -05:00
Ethan Buchman
d83ca54b36 file name fixes 2017-01-12 16:17:43 -05:00
Ethan Buchman
e0aead0be2 update glide 2017-01-12 16:12:41 -05:00
Ethan Buchman
94b6dd65ee AppendTx -> DeliverTx 2017-01-12 15:55:03 -05:00
Ethan Buchman
c147b41013 TMSP -> ABCI 2017-01-12 15:53:32 -05:00
Ethan Buchman
3a55339114 Merge pull request #364 from tendermint/collapse_info
tmsp: ResponseInfo and ResponseEndBlock
2017-01-12 15:43:59 -05:00
Ethan Buchman
2dd7030579 tmsp: ResponseInfo and ResponseEndBlock 2017-01-12 15:21:20 -05:00
Ethan Buchman
e7a12f8e38 cs.Wait() 2017-01-12 14:44:42 -05:00
Ethan Buchman
0525e8ed5c rearrange common_test.go; EnsureDir for privVal 2017-01-12 12:38:09 -05:00
Ethan Buchman
1d091bafe9 update glide 2017-01-12 11:00:43 -05:00
Ethan Buchman
814ef37f75 fix tests 2017-01-12 10:58:44 -05:00
Ethan Buchman
56341de5eb Merge remote-tracking branch 'mappum/rpc-tests' into unstable 2017-01-12 02:46:10 -05:00
Ethan Buchman
3c589dac19 startConsensusNet and stopConsensusNet 2017-01-12 02:29:53 -05:00
Ethan Buchman
618fce8d32 update glide 2017-01-11 23:47:28 -05:00
Ethan Buchman
ce0c638005 little fix 2017-01-11 18:37:36 -05:00
Ethan Buchman
15d3d12098 Merge pull request #342 from akalyaev/feature/update-vagrant-setup
Feature: Update Vagrant setup
2017-01-11 18:03:20 -05:00
Ethan Buchman
76b4a45f98 Merge pull request #360 from tendermint/optional-skip-timeout-commit
Optional skip timeout commit
2017-01-11 18:02:46 -05:00
Anton Kalyaev
535fc6cd63 test we can make blocks with skip_timeout_commit=false 2017-01-11 18:00:27 -05:00
Anton Kalyaev
3308ac7d83 set skip_timeout_commit to true for tests
For the tests its better to not use the timeout_commit, and to wait for all the
votes, because otherwise we can end up with timing dependencies in the testing
code which can lead to nondeterministic failures. That was part of the reason
for this change originally.
2017-01-11 18:00:26 -05:00
Anton Kalyaev
a1fd312bb1 make progress asap on full precommit votes optional (Refs #348) 2017-01-11 18:00:26 -05:00
Anton Kalyaev
b096651e10 fix glide error: unable to export dependencies to vendor directory
```
[ERROR] Unable to export dependencies to vendor directory: remove /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/golang.org/x/sys/unix: directory not empty
```
2017-01-11 17:59:05 -05:00
Ethan Buchman
44f939c841 Merge pull request #359 from tendermint/val-set-changes
Val set changes
2017-01-11 17:56:52 -05:00
Ethan Buchman
d68cdce2d5 consensus: check HasAll when TwoThirdsMajority 2017-01-11 17:53:46 -05:00
Jae Kwon
43fdc4a1ce Fix #341 2017-01-11 08:57:10 -08:00
Anton Kalyaev
cb2f2b94ee log stages to stdout 2017-01-11 10:35:04 -05:00
Anton Kalyaev
4722410e5e test validator set changes more extensively 2017-01-11 10:35:04 -05:00
Ethan Buchman
55b4bfa1fe consensus: let time.Timer handle non-positive durations 2017-01-11 10:24:40 -05:00
Matt Bell
1532879c64 Fixed RPC client tests 2017-01-07 14:35:54 -08:00
Ethan Buchman
12d92fd5db Merge pull request #343 from tendermint/restart_test
Crash/Restart tests
2017-01-06 11:55:05 -08:00
Ethan Buchman
bae0bc02a6 consensus: be more explicit when we need to write height after handshake 2017-01-05 20:16:42 -08:00
Anton Kalyaev
99974ddc8b update vagrant setup
I am in favor of docker, but people say that running docker containers
inside another container is bad.

Things included in provision:

- docker (latest)
- jq
- curl
- shellcheck
- golang 1.7
- glide
2016-12-26 21:21:06 +04:00
Ethan Buchman
f30a9752e2 more fixes from review 2016-12-23 11:11:22 -05:00
Ethan Buchman
c90985d309 test: set log_level=info 2016-12-22 22:25:47 -05:00
Ethan Buchman
bd222d6e3c test: more unique container names 2016-12-22 22:10:40 -05:00
Ethan Buchman
0c01b0ded9 state.State and wal.writeHeight after handshake 2016-12-22 22:10:36 -05:00
Ethan Buchman
e5fb681615 consensus: remove crankTimeoutPropose from tests 2016-12-22 22:03:42 -05:00
Ethan Buchman
c9698e4848 fixes from review 2016-12-22 22:03:42 -05:00
Ethan Buchman
0e7694ca94 state: AppHashIsStale -> IntermediateState 2016-12-22 15:01:22 -05:00
Ethan Buchman
f4e6cf4439 consensus: sync wal.writeHeight 2016-12-22 15:01:02 -05:00
Ethan Buchman
e4921733df test/persist: use fail-test failure indices 2016-12-22 02:49:54 -05:00
Anton Kalyaev
69a449a073 test/p2p: use PROXY_APP=persistent_dummy 2016-12-22 01:43:23 -05:00
Anton Kalyaev
30328548f7 test/p2p: kill and restart all nodes 2016-12-22 01:42:51 -05:00
Anton Kalyaev
57f3592411 fix 2 errors when running p2p tests more than once
Error #1:

```
Error response from daemon: network with name local_testnet already exists
```

Fixed by stopping and removing local_testnet containers and removing
the network

Error #2:

```
docker: Error response from daemon: Conflict. The name "/test_container_basic" is already in use by container a7cd15d479a964675e7f259de4ed852e7dfef85b447514728f437cd0b980a709. You have to remove (or rename) that container to beable to reuse that name..
```

Fixed by adding `--rm` flag.
2016-12-21 19:18:18 -05:00
Anton Kalyaev
1c24031dd2 rename COUNT to ID 2016-12-21 19:18:18 -05:00
Anton Kalyaev
3d47ef9d74 fix typo 2016-12-21 19:18:18 -05:00
Anton Kalyaev
6488894210 add .vagrant to .gitignore 2016-12-21 19:18:18 -05:00
Ethan Buchman
2f0d31b4b6 test: remove codecov patch threshold 2016-12-20 01:47:59 -05:00
Ethan Buchman
b2376058a1 blockchain: thread safe store.Height() 2016-12-20 00:45:45 -05:00
Ethan Buchman
e981ff4e7d tests: shorten timeouts 2016-12-20 00:17:41 -05:00
Ethan Buchman
b126ca0606 consensus: no internal vars in reactor.String() 2016-12-20 00:17:37 -05:00
Ethan Buchman
40b08f2494 consensus: mv timeoutRoutine into TimeoutTicker 2016-12-19 22:29:32 -05:00
Ethan Buchman
8211fa6ce4 enterNewRound on HasAll 2016-12-19 20:12:37 -05:00
Ethan Buchman
706dd1d6c5 test: dont start cs until all peers connected 2016-12-19 19:50:40 -05:00
Ethan Buchman
6bc3b8dc6d test: circle artifacts 2016-12-19 15:42:39 -05:00
Ethan Buchman
faf23aa0d4 consensus: TimeoutTicker, skip TimeoutCommit on HasAll 2016-12-19 15:42:36 -05:00
Ethan Buchman
18b3821e06 glide: update go-wire 2016-12-18 00:18:59 -05:00
Ethan Buchman
1bd700ee52 test: automate building consensus/test_data 2016-12-18 00:15:10 -05:00
Ethan Buchman
38783e7fa1 types: SignatureEd25519 -> Signature 2016-12-18 00:14:07 -05:00
Ethan Buchman
dcbb35089f consensus: wal.Flush() and cleanup replay tests 2016-12-17 23:43:17 -05:00
Ethan Buchman
f33cc3fb3b Merge pull request #325 from tendermint/sign_bytes
Sign bytes
2016-12-17 21:48:19 -05:00
Ethan Buchman
1b3766d802 types: canonical_json.go 2016-12-17 21:21:27 -05:00
zachary balder
da8b043612 sign bytes w struct literals 2016-12-17 21:21:27 -05:00
Ethan Buchman
ed42f70248 types: benchmark WriteSignBytes 2016-12-17 21:21:27 -05:00
Ethan Buchman
0204d3c6a6 Merge pull request #330 from tendermint/fix_tests
Fix tests
2016-12-17 21:18:14 -05:00
Ethan Buchman
81f91aebc2 test: crank circle timeouts 2016-12-17 15:16:58 -05:00
Ethan Buchman
de6bba4609 test: randConsensusNet takes more args 2016-12-17 14:45:20 -05:00
Ethan Buchman
c3a3cc76d8 Merge pull request #334 from tendermint/release-0.7.4
shame: version bump 0.7.4
2016-12-14 10:08:12 -05:00
Ethan Buchman
74ff489e63 shame: version bump 0.7.4 2016-12-14 10:05:25 -05:00
Ethan Buchman
55b47bcb8e Merge pull request #323 from tendermint/release-0.7.4
Release 0.7.4: feature flag: pex reactor
2016-12-14 09:44:34 -05:00
Ethan Buchman
bcd8712ec3 test: more cleanup on p2p 2016-12-12 16:00:21 -05:00
Ethan Buchman
b212aa0db3 fixes from review 2016-12-12 14:12:37 -05:00
Ethan Buchman
8390e88e27 fix flowrate dep and test 2016-12-09 01:28:08 -05:00
Ethan Buchman
bc95700dcf update README 2016-12-09 00:31:56 -05:00
Ethan Buchman
12c6594c9b rpc: remove restriction on DialSeeds 2016-12-09 00:31:53 -05:00
Ethan Buchman
d800a51da4 test: crank it to eleventy 2016-12-07 20:13:52 -05:00
Ethan Buchman
73502fab0d shame: forgot a file 2016-12-06 23:17:53 -05:00
Ethan Buchman
2425711734 blockchain: use ApplyBlock 2016-12-06 23:01:55 -05:00
Ethan Buchman
6be5bda8c9 types: copy commit bit array 2016-12-06 23:01:25 -05:00
Ethan Buchman
2abcde03ad tests: cleanup and fix scripts 2016-12-06 23:00:39 -05:00
Ethan Buchman
69ef1da58c types: copy vote set bit array 2016-12-06 20:53:02 -05:00
Ethan Buchman
8df32cd540 test: increase proposal timeout 2016-12-06 19:54:10 -05:00
Jae Kwon
3aa06d0851 Try CircleCI timeout extension again 2016-12-06 04:28:05 -08:00
Jae Kwon
e611e97339 Try to extend CircleCI timeout 2016-12-06 04:25:31 -08:00
Jae Kwon
3000c8b349 Update glide w/ TMSP updates 2016-12-06 04:04:08 -08:00
Jae Kwon
6f88d04ac4 call db.SetSync when necessary 2016-12-06 02:52:07 -08:00
Jae Kwon
1c16ce8cf0 Update go-db and tmsp 2016-12-06 02:18:13 -08:00
Jae Kwon
070728429b Upgrade autofile and merkle 2016-12-06 01:59:21 -08:00
Jae Kwon
918f76f96a Remove flowcontrol 2016-12-06 01:26:05 -08:00
Jae Kwon
4202c4bf20 Fix Merge pull request #319 2016-12-06 01:16:13 -08:00
Ethan Buchman
dc436e72f9 Merge pull request #255 from tendermint/broadcast_tx_grpc
broadcast_tx via grpc
2016-12-02 00:31:40 -05:00
Ethan Buchman
b74a97a4f6 update grpc broadcast tx 2016-12-02 00:29:25 -05:00
Ethan Buchman
2ef695da97 include check/append responses in broadcast_tx_commit 2016-12-02 00:29:25 -05:00
Ethan Buchman
db437e7a45 broadcast_tx via grpc 2016-12-02 00:29:25 -05:00
Ethan Buchman
538a50c9e8 Merge pull request #319 from tendermint/valset
Validator Set Changes
2016-12-02 00:28:07 -05:00
Ethan Buchman
0fe53dc5cf remove privValIndex; Stale->AppHashIsStale 2016-12-02 00:27:36 -05:00
Ethan Buchman
8b80f8ee05 feature flag: pex reactor 2016-11-30 23:07:45 -05:00
Ethan Buchman
3e7c7f51fa update glide 2016-11-23 19:44:25 -05:00
Ethan Buchman
2f9063c1d6 consensus: test validator set change 2016-11-23 18:20:46 -05:00
Ethan Buchman
65496ace20 test: tmsp query result is json 2016-11-22 21:50:54 -05:00
Ethan Buchman
64c7a0ad0d update glide 2016-11-22 21:28:57 -05:00
Ethan Buchman
5046d5b181 more handshake replay cleanup 2016-11-22 20:38:14 -05:00
Ethan Buchman
6f8c91b651 use NewValidator; fix setPrivValidatorIndex 2016-11-22 20:38:14 -05:00
Ethan Buchman
e0db20c0cf update privValidatorIndex on valset change 2016-11-22 20:38:14 -05:00
Ethan Buchman
e1e2c1c740 cleanup ReplayBlocks 2016-11-22 20:38:14 -05:00
Ethan Buchman
655b6300f5 val set changes 2016-11-22 20:38:14 -05:00
Ethan Buchman
d7f6c0775a remove LastCommitHeight 2016-11-22 20:38:14 -05:00
Ethan Buchman
a3d863f83b consensus: track index of privVal 2016-11-22 20:38:14 -05:00
Jae Kwon
deb4c428fd Update go-autofile 2016-11-21 20:29:30 -08:00
Jae Kwon
b8e94b1d30 Update autofile 2016-11-21 20:19:49 -08:00
Jae Kwon
a151216e5e Update go-autofile dependency; fixed checkTotalSizeLimit 2016-11-21 19:58:40 -08:00
Jae Kwon
e09950d3fb Use new Group semantics 2016-11-21 19:16:19 -08:00
Ethan Buchman
3206e101f5 Merge pull request #265 from tendermint/handshake
Handshake
2016-11-16 17:00:01 -05:00
Ethan Buchman
c6a648fad7 consensus: lock before loading commit 2016-11-16 16:47:31 -05:00
Ethan Buchman
904eeddf36 update glide 2016-11-16 16:25:52 -05:00
Ethan Buchman
07597dfd45 post rebase fixes for BlockID, partSize 2016-11-16 16:13:17 -05:00
Ethan Buchman
4360c360a4 move handshake to state, use Handshaker, more tests 2016-11-16 13:29:22 -05:00
Ethan Buchman
befd8b0cb2 post rebase fixes 2016-11-16 13:27:06 -05:00
Ethan Buchman
3f90fcae48 fail tests and fix 2016-11-16 13:26:39 -05:00
Ethan Buchman
8ec1839f5d save block b4 apply; track stale apphash 2016-11-16 13:26:39 -05:00
Ethan Buchman
fb9735ef46 rebase fixes and BeginBlock(hash,header) 2016-11-16 13:25:13 -05:00
Ethan Buchman
138de19e1e test: app persistence 2016-11-16 13:25:13 -05:00
Ethan Buchman
d3ae920bd0 state: ApplyBlock 2016-11-16 13:25:13 -05:00
Ethan Buchman
f37f56d4f1 fixes 2016-11-16 13:25:13 -05:00
Ethan Buchman
a0e4253edc handshake 2016-11-16 13:25:13 -05:00
Ethan Buchman
c3d5634efa begin block 2016-11-16 13:23:57 -05:00
Ethan Buchman
c05b2c5c59 Merge pull request #314 from tendermint/bft_fix_rebase
Bft fix rebase
2016-11-16 01:52:54 -05:00
Ethan Buchman
f763a9ef56 scripts/txs/random.sh 2016-11-16 01:32:13 -05:00
Ethan Buchman
81e6df0d57 cswal: write #HEIGHT:1 for empty wal 2016-11-16 01:23:19 -05:00
Jae Kwon
d83fc02597 MakePartSet takes partSize from config. fix replay test 2016-11-16 01:23:16 -05:00
Jae Kwon
95c8bb4252 Fixing issues from review in #229 2016-11-15 18:48:35 -05:00
Jae Kwon
c1729addce Fix BFT issue where VoteSetMaj23Message wasn't being sent where prs.Round == blockStore.Round() 2016-11-15 18:48:35 -05:00
Jae Kwon
3e3b034252 Make ConsensusReactor use ConsensusState's blockstore; debug functions 2016-11-15 18:48:34 -05:00
Ethan Buchman
9d0c7f6ec7 fix bft test. still halts 2016-11-15 18:47:19 -05:00
Ethan Buchman
c5a803a146 DataChannel -> StateChannel for Maj23Msg 2016-11-15 18:45:36 -05:00
Ethan Buchman
5f55ed2a40 consensus: ensure dir for cswal on reactor tests 2016-11-15 18:45:36 -05:00
Ethan Buchman
7afcf92539 consensus: fix panic on POLRound=-1 2016-11-15 18:45:36 -05:00
Ethan Buchman
57da2e4af5 make byzantine logic testable 2016-11-15 18:45:36 -05:00
Ethan Buchman
f837252ff1 consensus: test reactor 2016-11-15 18:37:33 -05:00
Jae Kwon
fd128c7180 Fix comments from review 2016-11-15 18:37:00 -05:00
Jae Kwon
ea4b60a602 Fix compile bug 2016-11-15 18:37:00 -05:00
Jae Kwon
b73a6905a1 Initial pass at bft_fix_2 completion 2016-11-15 18:37:00 -05:00
Jae Kwon
655d829314 Fix proposal sign bytes. Start tracking blockID in POL 2016-11-15 18:35:17 -05:00
Jae Kwon
1173a85c85 Use BlockID everywhere 2016-11-15 18:34:58 -05:00
Jae Kwon
40791d886d Add test for new VoteSet 2016-11-15 18:33:16 -05:00
Jae Kwon
7221887330 VoteSet can handle conflicting votes. TODO: add more tests 2016-11-15 18:33:16 -05:00
Jae Kwon
3c5a2f55c2 Add validator index and address to Vote. 2016-11-15 18:33:16 -05:00
Ethan Buchman
0b098a2eee use glide in install_tmsp_apps.sh 2016-11-15 18:05:38 -05:00
Ethan Buchman
1765d3c866 update glide 2016-11-15 15:57:03 -05:00
Ethan Buchman
1fedf5b332 Merge branch 'consensus_wal_autofile' into develop_old 2016-11-15 15:43:22 -05:00
Ethan Buchman
f94836783c Merge branch 'master' into develop_old 2016-11-15 15:43:10 -05:00
Jae Kwon
2aecb2a4a3 Ensure *_wal_dir exists 2016-11-05 09:15:34 -07:00
Jae Kwon
bf1bceec87 Use go-flowcontrol 2016-11-04 06:46:34 -07:00
Ethan Buchman
01a3ac50af update glide 2016-11-03 20:16:44 -04:00
Ethan Buchman
3ff9355e7b change some logs to debug 2016-11-03 20:13:39 -04:00
Ethan Buchman
94ac890859 send BeginBlock 2016-11-03 19:51:22 -04:00
Jae Kwon
3d3d8b5b7b cswal -> cs_wal_dir 2016-10-30 03:55:27 -07:00
Jae Kwon
1788a68b1c Consensus WAL uses AutoFile/Group 2016-10-28 15:01:14 -07:00
Jae Kwon
480f44f16c QuitService->BaseService 2016-10-28 12:14:24 -07:00
Jae Kwon
9a089482dc Unnest 2016-10-28 11:58:09 -07:00
Jae Kwon
fc31b463b1 Don't use io.Seek*, not supported in older versions 2016-10-26 22:19:44 -07:00
Jae Kwon
539f8f91bd Update glide file w/ go-common develop branch & go-autofile 2016-10-26 21:59:56 -07:00
Jae Kwon
830e84adc4 Fix minor bug in Consensus WAL; Fix AutoFile dependency 2016-10-26 21:51:03 -07:00
Ethan Buchman
40f2a128b8 Merge pull request #307 from tendermint/release-0.7.3
Release 0.7.3
2016-10-20 21:36:40 -04:00
Ethan Buchman
1786890e32 Merge branch 'master' into release-0.7.3 2016-10-20 21:11:17 -04:00
Ethan Buchman
435d1e3da7 test: install glide for network test 2016-10-20 12:30:22 -04:00
Jae Kwon
642a24dc9c Mempool WAL 2016-10-17 16:54:51 -07:00
Jae Kwon
eab4e1cfa1 Remove redundant cs.WAL from NewNode() 2016-10-17 16:37:32 -07:00
Jae Kwon
a9d8039082 Fix peer memleak; stop goroutine when peer is offline 2016-10-17 11:29:43 -07:00
Ethan Buchman
2113b6f4bb rpc: use interfaces for pipe 2016-10-14 21:38:49 -04:00
Ethan Buchman
7d493774c7 log: move some Info to Debug 2016-10-14 20:27:50 -04:00
Ethan Buchman
de0fc87c1c test: use glide with mintnet/netmon 2016-10-12 12:28:34 -04:00
Ethan Buchman
f0871e4f5e update some scripts 2016-10-12 12:27:37 -04:00
Ethan Buchman
1b37c8affd version: bump 0.7.3 2016-10-12 00:24:24 -04:00
Ethan Buchman
3e29f2bdc2 Merge pull request #297 from tendermint/docs
remove INSTALL dir, add INSTALL.md, update DOCKER
2016-10-11 23:30:38 -04:00
Ethan Buchman
3f4af438c5 remove INSTALL dir, add INSTALL.md, update DOCKER 2016-10-11 23:27:59 -04:00
Ethan Buchman
2cb13bf852 Merge pull request #298 from tendermint/get_round_state
consensus: hvs.StringIndented() was missing a lock. addresses #284
2016-10-11 22:25:37 -04:00
Ethan Buchman
0098387fbf consensus: hvs.StringIndented needed a lock. addresses #284 2016-10-11 19:12:39 -04:00
Ethan Buchman
7e07919d9d Merge pull request #296 from tendermint/replay_fixes
Replay fixes
2016-10-11 16:36:21 -04:00
Ethan Buchman
71baad59df replay: ensure cs.height and wal.height match 2016-10-11 16:06:46 -04:00
Ethan Buchman
9365d33243 replay: more tests 2016-10-11 12:55:04 -04:00
Ethan Buchman
3c18d841fa replay: larger read buffer 2016-10-11 12:51:48 -04:00
Ethan Buchman
7dcb567e53 replay test data 2016-10-11 11:44:07 -04:00
Ethan Buchman
7a424e6b12 Merge pull request #291 from tendermint/type-safe-fire-event
Type safe fire event
2016-10-10 17:36:52 -04:00
Ethan Buchman
302bbc5dbd Merge pull request #295 from tendermint/codecov
add codecov.yml to lower threshold
2016-10-10 17:32:00 -04:00
Ethan Buchman
97a51c130f Merge pull request #290 from tendermint/mempool_txcache
Fix race condition; Refactor out txCache
2016-10-10 17:20:42 -04:00
Ethan Buchman
00d53714f9 add codecov.yml to lower threshold 2016-10-10 17:18:34 -04:00
Ethan Buchman
a07063f119 add test for mempool deadlock 2016-10-10 17:05:50 -04:00
Ethan Buchman
9393be7a49 update glide 2016-10-10 14:55:54 -04:00
Ethan Buchman
9d331715c0 Merge pull request #293 from ethanfrey/type-safe-fire-event
Add test for proper tx event
2016-10-10 14:36:01 -04:00
Ethan Frey
0665bc6e4b Add test for proper tx event 2016-10-10 09:55:47 +02:00
Ethan Buchman
35d4cca8bb type safe events 2016-10-10 03:10:29 -04:00
Jae Kwon
b38748ad1a Fix race condition; Refactor out txCache 2016-10-07 15:30:17 -07:00
Ethan Frey
22979d9365 Fire proper EventData object on append transaction 2016-10-01 22:12:48 +02:00
Ethan Buchman
be3592adf6 Merge pull request #280 from tendermint/develop
Develop
2016-09-26 00:44:53 +09:00
Ethan Buchman
a5f6069967 Merge pull request #279 from tendermint/codecov
Codecov and badges
2016-09-26 00:29:23 +09:00
Ethan Buchman
c481d2bcbb README shields 2016-09-26 00:17:58 +09:00
Ethan Buchman
a1649f774e test: codecov 2016-09-25 22:08:48 +09:00
Jae Kwon
22155df759 Merge pull request #276 from silasdavis/mempool-reap
Fix mistake in Mempool Reap documentation
2016-09-16 08:31:11 -07:00
Silas Davis
7f31ec3398 Fix doc comment on mempool reap 2016-09-16 17:09:18 +02:00
Ethan Buchman
121714c040 Merge branch 'master' into develop 2016-09-12 12:01:42 -04:00
Ethan Buchman
bd43cf2442 Merge pull request #272 from tendermint/hotfix-1.7.2
Hotfix 1.7.2
2016-09-11 16:29:23 -04:00
Ethan Buchman
c7e578ac0d check tmsp client err and set mustConnect=false 2016-09-11 16:08:46 -04:00
Ethan Buchman
0fd8c98301 test starting tendermint before app 2016-09-11 16:08:46 -04:00
Ethan Buchman
e36e79a474 Merge pull request #271 from tendermint/release-0.7.1
Release 0.7.1
2016-09-10 21:14:44 -04:00
Ethan Buchman
0b919f4fd2 proxy: nil -> nilapp 2016-09-10 20:46:12 -04:00
Ethan Buchman
25839d0bb5 test: add killall to dockerfile. cleanup 2016-09-10 20:11:58 -04:00
Ethan Buchman
7f538266ea test: add xxd dep to dockerfile 2016-09-10 19:00:02 -04:00
Ethan Buchman
364932238a bump version 0.7.1 2016-09-10 18:05:13 -04:00
Ethan Buchman
34024291af glide update 2016-09-10 18:04:16 -04:00
Ethan Buchman
a58e8d47c6 Merge pull request #270 from tendermint/query_rpc
Query rpc
2016-09-10 17:44:41 -04:00
Ethan Buchman
28ec26462a test: test dummy using rpc query 2016-09-10 17:42:12 -04:00
Ethan Buchman
caeda30b72 proxy: wrap NewTMSPClient in ClientCreator 2016-09-10 17:19:47 -04:00
Ethan Buchman
41918d619c expose query and info through rpc 2016-09-10 17:14:42 -04:00
Ethan Buchman
bfa690b6f7 config: reduce timeouts during test 2016-09-10 15:16:23 -04:00
Ethan Buchman
035ca7ef61 proxy: NewAppConns takes a NewTMSPClient func 2016-09-09 23:55:24 -04:00
Ethan Buchman
206d00ed8c fixes from review 2016-09-09 23:50:25 -04:00
Ethan Buchman
9fb84d66be Merge pull request #268 from tendermint/replay
consensus: no sign err in replay; fix a race
2016-09-09 23:04:39 -04:00
Ethan Buchman
525378a145 Merge pull request #267 from tendermint/fastsync_fix
Fastsync fix
2016-09-09 20:10:06 -04:00
Ethan Buchman
398428d711 Merge pull request #266 from tendermint/peer_filter
Peer filter
2016-09-09 20:00:53 -04:00
Ethan Buchman
987dac9ee0 consensus: no sign err in replay; fix a race 2016-09-08 19:00:59 -04:00
Ethan Buchman
9bb32f41f1 config: filter_peers defaults to false 2016-09-08 18:56:02 -04:00
Ethan Buchman
bf7ba17932 test/p2p: name client conts so we dont need to rm them because circle 2016-08-27 17:20:34 -04:00
Ethan Buchman
7bec333017 fix fast sync 2016-08-27 14:50:10 -04:00
Ethan Buchman
1bfd67dfc6 test: refactor bash; test fastsync (failing) 2016-08-27 14:50:07 -04:00
Ethan Buchman
943ad0e93f filter peers by addr/pubkey. closes #244 2016-08-25 14:20:11 -04:00
Ethan Buchman
72c44efd84 fix query conn 2016-08-25 14:16:28 -04:00
Ethan Buchman
d9205a85d5 query conn 2016-08-24 01:45:45 -04:00
Ethan Buchman
3a7ee13ece proxy: typed app conns 2016-08-24 01:45:45 -04:00
Ethan Buchman
4802145e8f Merge pull request #249 from tendermint/lastsig
types: privVal.LastSignature. closes #247
2016-08-23 11:42:28 -04:00
Ethan Buchman
7df79d9339 Merge pull request #263 from silasdavis/fix-unsubscribe
Fix unsubscribe
2016-08-23 11:41:01 -04:00
Ethan Buchman
678599c7d4 consensus: add note about replay test 2016-08-23 11:33:18 -04:00
Silas Davis
92736f22e8 Fix unsubscribe 2016-08-23 10:40:42 +01:00
Ethan Buchman
3998bdbfc1 fixes from review 2016-08-17 23:08:43 -04:00
Ethan Buchman
1110c5d37d privVal.LastSignBytes and more replay tests 2016-08-14 13:33:03 -04:00
Ethan Buchman
a1c20ce866 types: privVal.LastSignature. closes #247 2016-08-14 13:33:03 -04:00
Ethan Buchman
4776a7bcbe fix circle.yml 2016-08-14 13:32:19 -04:00
Ethan Buchman
c4764ff916 update glide.lock 2016-08-10 02:16:44 -04:00
Ethan Buchman
bd429f3d4f config: all urls use tcp:// or unix:// prefix 2016-08-10 02:16:41 -04:00
Ethan Buchman
c90bde3187 some comments 2016-08-09 20:31:53 -04:00
Ethan Buchman
cdfbf05e8c Merge branch 'master' into develop 2016-08-07 00:17:42 -04:00
207 changed files with 12525 additions and 3646 deletions

21
.codecov.yml Normal file
View File

@@ -0,0 +1,21 @@
#
# This codecov.yml is the default configuration for
# all repositories on Codecov. You may adjust the settings
# below in your own codecov.yml in your repository.
#
coverage:
precision: 2
round: down
range: 70...100
status:
# Learn more at https://codecov.io/docs#yaml_default_commit_status
project:
default:
threshold: 1% # allow this much decrease on project
changes: false
comment:
layout: "header, diff"
behavior: default # update if exists else create new

15
.editorconfig Normal file
View File

@@ -0,0 +1,15 @@
# top-most EditorConfig file
root = true
# Unix-style newlines with a newline ending every file
[*]
charset = utf-8
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[Makefile]
indent_style = tab
[*.sh]
indent_style = tab

41
.github/ISSUE_TEMPLATE vendored Normal file
View File

@@ -0,0 +1,41 @@
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
<!--
If this is a BUG REPORT, please:
- Fill in as much of the template below as you can.
If this is a FEATURE REQUEST, please:
- Describe *in detail* the feature/behavior/change you'd like to see.
In both cases, be ready for followup questions, and please respond in a timely
manner. We might ask you to provide additional logs and data (tendermint & app)
in a case of bug.
-->
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
**Merkleeyes version** (use `git rev-parse --verify HEAD`, skip if you don't use it):
**Environment**:
- **OS** (e.g. from /etc/os-release):
- **Install tools**:
- **Others**:
**What happened**:
**What you expected to happen**:
**How to reproduce it** (as minimally and precisely as possible):
**Anything else do we need to know**:

4
.gitignore vendored
View File

@@ -10,3 +10,7 @@ rpc/test/.tendermint
remote_dump
.revision
vendor
.vagrant
test/p2p/data/
test/logs
.glide

264
CHANGELOG.md Normal file
View File

@@ -0,0 +1,264 @@
# Changelog
## 0.9.2 (April 26, 2017)
BUG FIXES:
- Fix bug in `ResetPrivValidator` where we were using the global config and log (causing external consumers, eg. basecoin, to fail).
## 0.9.1 (April 21, 2017)
FEATURES:
- Transaction indexing - txs are indexed by their hash using a simple key-value store; easily extended to more advanced indexers
- New `/tx?hash=X` endpoint to query for transactions and their DeliverTx result by hash. Optionally returns a proof of the tx's inclusion in the block
- `tendermint testnet` command initializes files for a testnet
IMPROVEMENTS:
- CLI now uses Cobra framework
- TMROOT is now TMHOME (TMROOT will stop working in 0.10.0)
- `/broadcast_tx_XXX` also returns the Hash (can be used to query for the tx)
- `/broadcast_tx_commit` also returns the height the block was committed in
- ABCIResponses struct persisted to disk before calling Commit; makes handshake replay much cleaner
- WAL uses #ENDHEIGHT instead of #HEIGHT (#HEIGHT will stop working in 0.10.0)
- Peers included via `--seeds`, under `seeds` in the config, or in `/dial_seeds` are now persistent, and will be reconnected to if the connection breaks
BUG FIXES:
- Fix bug in fast-sync where we stop syncing after a peer is removed, even if they're re-added later
- Fix handshake replay to handle validator set changes and results of DeliverTx when we crash after app.Commit but before state.Save()
## 0.9.0 (March 6, 2017)
BREAKING CHANGES:
- Update ABCI to v0.4.0, where Query is now `Query(RequestQuery) ResponseQuery`, enabling precise proofs at particular heights:
```
message RequestQuery{
bytes data = 1;
string path = 2;
uint64 height = 3;
bool prove = 4;
}
message ResponseQuery{
CodeType code = 1;
int64 index = 2;
bytes key = 3;
bytes value = 4;
bytes proof = 5;
uint64 height = 6;
string log = 7;
}
```
- `BlockMeta` data type unifies its Hash and PartSetHash under a `BlockID`:
```
type BlockMeta struct {
BlockID BlockID `json:"block_id"` // the block hash and partsethash
Header *Header `json:"header"` // The block's Header
}
```
- `ValidatorSet.Proposer` is exposed as a field and persisted with the `State`. Use `GetProposer()` to initialize or update after validator-set changes.
- `tendermint gen_validator` command output is now pure JSON
FEATURES:
- New RPC endpoint `/commit?height=X` returns header and commit for block at height `X`
- Client API for each endpoint, including mocks for testing
IMPROVEMENTS:
- `Node` is now a `BaseService`
- Simplified starting Tendermint in-process from another application
- Better organized Makefile
- Scripts for auto-building binaries across platforms
- Docker image improved, slimmed down (using Alpine), and changed from tendermint/tmbase to tendermint/tendermint
- New repo files: `CONTRIBUTING.md`, Github `ISSUE_TEMPLATE`, `CHANGELOG.md`
- Improvements on CircleCI for managing build/test artifacts
- Handshake replay is doen through the consensus package, possibly using a mockApp
- Graceful shutdown of RPC listeners
- Tests for the PEX reactor and DialSeeds
BUG FIXES:
- Check peer.Send for failure before updating PeerState in consensus
- Fix panic in `/dial_seeds` with invalid addresses
- Fix proposer selection logic in ValidatorSet by taking the address into account in the `accumComparable`
- Fix inconcistencies with `ValidatorSet.Proposer` across restarts by persisting it in the `State`
## 0.8.0 (January 13, 2017)
BREAKING CHANGES:
- New data type `BlockID` to represent blocks:
```
type BlockID struct {
Hash []byte `json:"hash"`
PartsHeader PartSetHeader `json:"parts"`
}
```
- `Vote` data type now includes validator address and index:
```
type Vote struct {
ValidatorAddress []byte `json:"validator_address"`
ValidatorIndex int `json:"validator_index"`
Height int `json:"height"`
Round int `json:"round"`
Type byte `json:"type"`
BlockID BlockID `json:"block_id"` // zero if vote is nil.
Signature crypto.Signature `json:"signature"`
}
```
- Update TMSP to v0.3.0, where it is now called ABCI and AppendTx is DeliverTx
- Hex strings in the RPC are now "0x" prefixed
FEATURES:
- New message type on the ConsensusReactor, `Maj23Msg`, for peers to alert others they've seen a Maj23,
in order to track and handle conflicting votes intelligently to prevent Byzantine faults from causing halts:
```
type VoteSetMaj23Message struct {
Height int
Round int
Type byte
BlockID types.BlockID
}
```
- Configurable block part set size
- Validator set changes
- Optionally skip TimeoutCommit if we have all the votes
- Handshake between Tendermint and App on startup to sync latest state and ensure consistent recovery from crashes
- GRPC server for BroadcastTx endpoint
IMPROVEMENTS:
- Less verbose logging
- Better test coverage (37% -> 49%)
- Canonical SignBytes for signable types
- Write-Ahead Log for Mempool and Consensus via go-autofile
- Better in-process testing for the consensus reactor and byzantine faults
- Better crash/restart testing for individual nodes at preset failure points, and of networks at arbitrary points
- Better abstraction over timeout mechanics
BUG FIXES:
- Fix memory leak in mempool peer
- Fix panic on POLRound=-1
- Actually set the CommitTime
- Actually send BeginBlock message
- Fix a liveness issues caused by Byzantine proposals/votes. Uses the new `Maj23Msg`.
## 0.7.4 (December 14, 2016)
FEATURES:
- Enable the Peer Exchange reactor with the `--pex` flag for more resilient gossip network (feature still in development, beware dragons)
IMPROVEMENTS:
- Remove restrictions on RPC endpoint `/dial_seeds` to enable manual network configuration
## 0.7.3 (October 20, 2016)
IMPROVEMENTS:
- Type safe FireEvent
- More WAL/replay tests
- Cleanup some docs
BUG FIXES:
- Fix deadlock in mempool for synchronous apps
- Replay handles non-empty blocks
- Fix race condition in HeightVoteSet
## 0.7.2 (September 11, 2016)
BUG FIXES:
- Set mustConnect=false so tendermint will retry connecting to the app
## 0.7.1 (September 10, 2016)
FEATURES:
- New TMSP connection for Query/Info
- New RPC endpoints:
- `tmsp_query`
- `tmsp_info`
- Allow application to filter peers through Query (off by default)
IMPROVEMENTS:
- TMSP connection type enforced at compile time
- All listen/client urls use a "tcp://" or "unix://" prefix
BUG FIXES:
- Save LastSignature/LastSignBytes to `priv_validator.json` for recovery
- Fix event unsubscribe
- Fix fastsync/blockchain reactor
## 0.7.0 (August 7, 2016)
BREAKING CHANGES:
- Strict SemVer starting now!
- Update to ABCI v0.2.0
- Validation types now called Commit
- NewBlock event only returns the block header
FEATURES:
- TMSP and RPC support TCP and UNIX sockets
- Addition config options including block size and consensus parameters
- New WAL mode `cswal_light`; logs only the validator's own votes
- New RPC endpoints:
- for starting/stopping profilers, and for updating config
- `/broadcast_tx_commit`, returns when tx is included in a block, else an error
- `/unsafe_flush_mempool`, empties the mempool
IMPROVEMENTS:
- Various optimizations
- Remove bad or invalidated transactions from the mempool cache (allows later duplicates)
- More elaborate testing using CircleCI including benchmarking throughput on 4 digitalocean droplets
BUG FIXES:
- Various fixes to WAL and replay logic
- Various race conditions
## PreHistory
Strict versioning only began with the release of v0.7.0, in late summer 2016.
The project itself began in early summer 2014 and was workable decentralized cryptocurrency software by the end of that year.
Through the course of 2015, in collaboration with Eris Industries (now Monax Indsutries),
many additional features were integrated, including an implementation from scratch of the Ethereum Virtual Machine.
That implementation now forms the heart of [ErisDB](https://github.com/eris-ltd/eris-db).
In the later half of 2015, the consensus algorithm was upgraded with a more asynchronous design and a more deterministic and robust implementation.
By late 2015, frustration with the difficulty of forking a large monolithic stack to create alternative cryptocurrency designs led to the
invention of the Application Blockchain Interface (ABCI), then called the Tendermint Socket Protocol (TMSP).
The Ethereum Virtual Machine and various other transaction features were removed, and Tendermint was whittled down to a core consensus engine
driving an application running in another process.
The ABCI interface and implementation were iterated on and improved over the course of 2016,
until versioned history kicked in with v0.7.0.

16
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,16 @@
# Contributing guidelines
**Thanks for considering making contributions to Tendermint!**
Please follow standard github best practices: fork the repo, **branch from the
tip of develop**, make some commits, test your code changes with `make test`,
and submit a pull request to develop.
See the [open issues](https://github.com/tendermint/tendermint/issues) for
things we need help with!
Please make sure to use `gofmt` before every commit - the easiest way to do
this is have your editor run it for you upon saving a file.
You can read the full guide [on our
site](https://tendermint.com/docs/guides/contributing).

View File

@@ -1,54 +1,46 @@
# Pull base image.
FROM golang:1.6
FROM alpine:3.5
ENV USER tmuser
ENV DATA_ROOT /data/tendermint
# This is the release of tendermint to pull in.
ENV TM_VERSION 0.9.0
ENV TM_SHA256SUM 697033ea0f34f8b34a8a2b74c4dd730b47dd4efcfce65e53e953bdae8eb14364
# Tendermint will be looking for genesis file in /tendermint (unless you change
# `genesis_file` in config.toml). You can put your config.toml and private
# validator file into /tendermint.
#
# The /tendermint/data dir is used by tendermint to store state.
ENV DATA_ROOT /tendermint
ENV TMHOME $DATA_ROOT
# Set user right away for determinism
RUN groupadd -r $USER \
&& useradd -r -s /bin/false -g $USER $USER
# Create home directory for USER
# Needed for nodejs/nom
RUN mkdir -p /home/$USER \
&& chown -R $USER:$USER /home/$USER
RUN addgroup tmuser && \
adduser -S -G tmuser tmuser
# Create directory for persistence and give our user ownership
RUN mkdir -p $DATA_ROOT \
&& chown -R $USER:$USER $DATA_ROOT
RUN mkdir -p $DATA_ROOT && \
chown -R tmuser:tmuser $DATA_ROOT
# Set the env variables to non-interactive
ENV DEBIAN_FRONTEND noninteractive
ENV DEBIAN_PRIORITY critical
ENV DEBCONF_NOWARNINGS yes
ENV TERM linux
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
# jq and curl used for extracting `pub_key` from private validator while
# deploying tendermint with Kubernetes. It is nice to have bash so the users
# could execute bash commands.
RUN apk add --no-cache bash curl jq
# Grab deps (git)
RUN apt-get update && \
apt-get install -y --no-install-recommends \
git && \
rm -rf /var/lib/apt/lists/*
RUN apk add --no-cache openssl && \
wget https://s3-us-west-2.amazonaws.com/tendermint/${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \
echo "${TM_SHA256SUM} tendermint_${TM_VERSION}_linux_amd64.zip" | sha256sum -c && \
unzip -d /bin tendermint_${TM_VERSION}_linux_amd64.zip && \
apk del openssl && \
rm -f tendermint_${TM_VERSION}_linux_amd64.zip
# Grab deps (node)
RUN curl -sL https://deb.nodesource.com/setup_5.x | bash -
RUN apt-get update && \
apt-get install -y --no-install-recommends \
nodejs && \
rm -rf /var/lib/apt/lists/*
# Copy run.sh
COPY ./run.sh $DATA_ROOT/run.sh
RUN chmod +x $DATA_ROOT/run.sh
# Persist data, set user
WORKDIR $DATA_ROOT
# Expose the data directory as a volume since there's mutable state in there
VOLUME $DATA_ROOT
USER $USER
ENV TMROOT $DATA_ROOT
# p2p port
EXPOSE 46656
# rpc port
EXPOSE 46657
# Run tendermint
CMD ["./run.sh"]
ENTRYPOINT ["tendermint"]
# By default you'll get the dummy app
CMD ["node", "--moniker=`hostname`", "--proxy_app=dummy"]

35
DOCKER/Dockerfile.develop Normal file
View File

@@ -0,0 +1,35 @@
FROM alpine:3.5
ENV DATA_ROOT /tendermint
ENV TMHOME $DATA_ROOT
RUN addgroup tmuser && \
adduser -S -G tmuser tmuser
RUN mkdir -p $DATA_ROOT && \
chown -R tmuser:tmuser $DATA_ROOT
RUN apk add --no-cache bash curl jq
ENV GOPATH /go
ENV PATH "$PATH:/go/bin"
RUN mkdir -p /go/src/github.com/tendermint/tendermint && \
apk add --no-cache go build-base git && \
cd /go/src/github.com/tendermint/tendermint && \
git clone https://github.com/tendermint/tendermint . && \
git checkout develop && \
make get_vendor_deps && \
make install && \
glide cc && \
cd - && \
rm -rf /go/src/github.com/tendermint/tendermint && \
apk del go build-base git
VOLUME $DATA_ROOT
EXPOSE 46656
EXPOSE 46657
ENTRYPOINT ["tendermint"]
CMD ["node", "--moniker=`hostname`", "--proxy_app=dummy"]

15
DOCKER/Makefile Normal file
View File

@@ -0,0 +1,15 @@
build:
# TAG=0.8.0 TAG_NO_PATCH=0.8
docker build -t "tendermint/tendermint" -t "tendermint/tendermint:$TAG" -t "tendermint/tendermint:$TAG_NO_PATCH" .
push:
# TAG=0.8.0 TAG_NO_PATCH=0.8
docker push "tendermint/tendermint" "tendermint/tendermint:$TAG" "tendermint/tendermint:$TAG_NO_PATCH"
build_develop:
docker build -t "tendermint/tendermint:develop" -f Dockerfile.develop .
push_develop:
docker push "tendermint/tendermint:develop"
.PHONY: build build_develop push push_develop

View File

@@ -1,13 +1,56 @@
# Build and run a docker image and container
# Supported tags and respective `Dockerfile` links
- `0.9.0`, `0.9`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile)
- `0.8.0`, `0.8` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/bf64dd21fdb193e54d8addaaaa2ecf7ac371de8c/DOCKER/Dockerfile)
- `develop` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/master/DOCKER/Dockerfile.develop)
`develop` tag points to the [develop](https://github.com/tendermint/tendermint/tree/develop) branch.
# Tendermint
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
For more background, see the [introduction](https://tendermint.com/intro).
To get started developing applications, see the [application developers guide](https://tendermint.com/docs/guides/app-development).
# How to use this image
## Start one instance of the Tendermint core with the `dummy` app
A very simple example of a built-in app and Tendermint core in one container.
```
# Build base Docker image
# Make sure ./run.sh exists.
docker build -t tendermint/tmbase -f Dockerfile .
# Log into dockerhub
docker login
# Push latest build to dockerhub
docker push tendermint/tmbase
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint
```
## mintnet-kubernetes
If you want to see many containers talking to each other, consider using [mintnet-kubernetes](https://github.com/tendermint/mintnet-kubernetes), which is a tool for running Tendermint-based applications on a Kubernetes cluster.
# Supported Docker versions
This image is officially supported on Docker version 1.13.1.
Support for older versions (down to 1.6) is provided on a best-effort basis.
Please see [the Docker installation documentation](https://docs.docker.com/installation/) for details on how to upgrade your Docker daemon.
# License
View [license information](https://raw.githubusercontent.com/tendermint/tendermint/master/LICENSE) for the software contained in this image.
# User Feedback
## Issues
If you have any problems with or questions about this image, please contact us through a [GitHub](https://github.com/tendermint/tendermint/issues) issue. If the issue is related to a CVE, please check for [a `cve-tracker` issue on the `official-images` repository](https://github.com/docker-library/official-images/issues?q=label%3Acve-tracker) first.
You can also reach the image maintainers via [Slack](http://forum.tendermint.com:3000/).
## Contributing
You are invited to contribute new features, fixes, or updates, large or small; we are always thrilled to receive pull requests, and do our best to process them as fast as we can.
Before you start to code, we recommend discussing your plans through a [GitHub](https://github.com/tendermint/tendermint/issues) issue, especially for more ambitious contributions. This gives other contributors a chance to point you in the right direction, give you feedback on your design, and help you find out if someone else is working on the same thing.

View File

@@ -1,10 +0,0 @@
#! /bin/bash
mkdir -p $GOPATH/src/$TMREPO
cd $GOPATH/src/$TMREPO
git clone https://$TMREPO.git .
git fetch
git reset --hard $TMHEAD
go get -d $TMREPO/cmd/tendermint
make
tendermint node --seeds="$TMSEEDS" --moniker="$TMNAME"

57
INSTALL.md Normal file
View File

@@ -0,0 +1,57 @@
# Install Go
[Install Go, set the `GOPATH`, and put `GOPATH/bin` on your `PATH`](https://github.com/tendermint/tendermint/wiki/Setting-GOPATH).
# Install Tendermint
You should be able to install the latest with a simple `go get -u github.com/tendermint/tendermint/cmd/tendermint`.
The `-u` makes sure all dependencies are updated as well.
Run `tendermint version` and `tendermint --help`.
If the install falied, see [vendored dependencies below](#vendored-dependencies).
To start a one-node blockchain with a simple in-process application:
```
tendermint init
tendermint node --proxy_app=dummy
```
See the [application developers guide](https://github.com/tendermint/tendermint/wiki/Application-Developers) for more details on building and running applications.
## Vendored dependencies
If the `go get` failed, updated dependencies may have broken the build.
Install the correct version of each dependency using `glide`.
Fist, install `glide`:
```
go get github.com/Masterminds/glide
```
Now, fetch the dependencies and install them with `glide` and `go`:
```
cd $GOPATH/src/github.com/tendermint/tendermint
glide install
go install ./cmd/tendermint
```
Sometimes `glide install` is painfully slow. Hang in there champ.
The latest Tendermint Core version is now installed. Check by running `tendermint version`.
## Troubleshooting
If `go get` failing bothers you, fetch the code using `git`:
```
mkdir -p $GOPATH/src/github.com/tendermint
git clone https://github.com/tendermint/tendermint $GOPATH/src/github.com/tendermint/tendermint
cd $GOPATH/src/github.com/tendermint/tendermint
glide install
go install ./cmd/tendermint
```

View File

@@ -1,21 +0,0 @@
1. Fork github.com/tendermint/tendermint.
2. Run "make", it should install the daemon, which we named "tendermint".
3. Run "tendermint gen_account". Save the address, pub_key bytes, and priv_key bytes.
This is your developer key for controlling the cloud nodes.
4. Also run "tendermint gen_validator" 5 times, once for each cloud node. Save the output.
5. Create a directory ~/.debora/ and copy cmd/debora/default.cfg into ~/.debora/default.cfg
Copy the priv_key bytes from step 4 into ~/.debora/default.cfg where it says so.
Change the list of hosts in ~/.debora/default.cfg with your own set of 5 cloud nodes.
6. Replace cmd/barak/seed's pubkey with the pub_key bytes from step 3.
7. Update config/tendermint/config.go's genesis with validator pubkeys from step 4.
Give each of your nodes the same amount of voting power.
Set up the accounts however you want.
8. On each cloud node, follow the instructions here: https://github.com/tendermint/tendermint/tree/master/INSTALL
Create tmuser, install go, and also install 'barak'.
Then, run `barak -config="cmd/barak/seed"`.
You don't need to start the node at this time.
9. Now you can run "debora list" on your development machine and post commands to each cloud node.
10. Run scripts/unsafe_upgrade_barak.sh to test that barak is running.
The old barak you started on step 8 should now have quit.
A new instance of barak should be running. Check with `ps -ef | grep "barak"`
11. Run scripts/unsafe_restart_net.sh start your new testnet.

View File

@@ -1,30 +0,0 @@
NOTE: Only Ubuntu 14.04 64bit is supported at this time.
### Server setup / create `tmuser`
Secure the server, install dependencies, and create a new user `tmuser`
curl -L https://raw.githubusercontent.com/tendermint/tendermint/master/INSTALL/install_env.sh > install_env.sh
source install_env.sh
cd /home/tmuser
### Install Go as `tmuser`
Don't use `apt-get install golang`, it's still on an old version.
curl -L https://raw.githubusercontent.com/tendermint/tendermint/master/INSTALL/install_golang.sh > install_golang.sh
source install_golang.sh
### Run Barak
WARNING: THIS STEP WILL GIVE CONTROL OF THE CURRENT USER TO THE DEV TEAM.
go get -u github.com/tendermint/tendermint/cmd/barak
nohup barak -config="$GOPATH/src/github.com/tendermint/tendermint/cmd/barak/seed" &
### Install/Update MintDB
go get -u github.com/tendermint/tendermint/cmd/tendermint
mkdir -p ~/.tendermint
cp $GOPATH/src/github.com/tendermint/tendermint/config/tendermint/genesis.json ~/.tendermint/
tendermint node --seeds="goldenalchemist.chaintest.net:46656"

View File

@@ -1,63 +0,0 @@
#!/bin/bash
# Run this as root user
# This part is for hardening the server and setting up a user account
if [ `whoami` != "root" ];
then
echo "You must run this script as root"
exit 1
fi
USER="tmuser"
OPEN_PORTS=(46656 46657 46658 46659 46660 46661 46662 46663 46664 46665 46666 46667 46668 46669 46670 46671)
SSH_PORT=22
WHITELIST=()
# update and upgrade
apt-get update -y
apt-get upgrade -y
# fail2ban for monitoring logins
apt-get install -y fail2ban
# set up the network time daemon
apt-get install -y ntp
# install dependencies
apt-get install -y make screen gcc git mercurial libc6-dev pkg-config libgmp-dev
# set up firewall
echo "ENABLE FIREWALL ..."
set -x
# white list ssh access
for ip in "${WHITELIST[@]}"; do
ufw allow from $ip to any port $SSH_PORT
done
if [ ${#WHITELIST[@]} -eq 0 ]; then
ufw allow $SSH_PORT
fi
# open ports
for port in "${OPEN_PORTS[@]}"; do
ufw allow $port
done
# apply
ufw --force enable
set +x
# set up firewall END
# watch the logs and have them emailed to me
# apt-get install -y logwatch
# echo "/usr/sbin/logwatch --output mail --mailto $ADMIN_EMAIL --detail high" >> /etc/cron.daily/00logwatch
# set up user account
echo "CREATE USER $USER ..."
useradd $USER -d /home/$USER
# This user should not have root access.
# usermod -aG sudo $USER
mkdir /home/$USER
cp /etc/skel/.bashrc .
cp /etc/skel/.profile .
chown -R $USER:$USER /home/$USER
echo "Done setting env. Switching to $USER..."
su $USER

View File

@@ -1,29 +0,0 @@
#!/bin/bash
# Run this as tmuser user
# This part is for installing go
if [ `whoami` == "root" ];
then
echo "You should not run this script as root"
exit 1
fi
USER=`whoami`
PWD=`pwd`
# get dependencies
# sudo apt-get install -y make screen gcc git mercurial libc6-dev pkg-config libgmp-dev
# install golang
cd /home/$USER
mkdir gocode
wget https://storage.googleapis.com/golang/go1.4.2.src.tar.gz
tar -xzvf go*.tar.gz
cd go/src
./make.bash
mkdir -p /home/$USER/go/src
echo 'export GOROOT=/home/$USER/go' >> /home/$USER/.bashrc
echo 'export GOPATH=/home/$USER/gocode' >> /home/$USER/.bashrc
echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/$USER/.bashrc
source /home/$USER/.bashrc
cd $PWD

View File

@@ -1,58 +1,74 @@
.PHONY: get_deps build all list_deps install
GOTOOLS = \
github.com/mitchellh/gox \
github.com/Masterminds/glide
PACKAGES=$(shell go list ./... | grep -v '/vendor/')
BUILD_TAGS?=tendermint
TMHOME = $${TMHOME:-$$HOME/.tendermint}
all: get_deps install test
all: install test
TMROOT = $${TMROOT:-$$HOME/.tendermint}
define NEWLINE
install: get_vendor_deps
@go install ./cmd/tendermint
build:
go build -o build/tendermint ./cmd/tendermint
endef
NOVENDOR = go list github.com/tendermint/tendermint/... | grep -v /vendor/
build_race:
go build -race -o build/tendermint ./cmd/tendermint
install: get_deps
go install github.com/tendermint/tendermint/cmd/tendermint
# dist builds binaries for all platforms and packages them for distribution
dist:
@BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'"
build:
go build -o build/tendermint github.com/tendermint/tendermint/cmd/tendermint
test:
@echo "--> Running go test"
@go test $(PACKAGES)
build_race:
go build -race -o build/tendermint github.com/tendermint/tendermint/cmd/tendermint
test_race:
@echo "--> Running go test --race"
@go test -v -race $(PACKAGES)
test: build
go test `${NOVENDOR}`
test_race: build
go test -race `${NOVENDOR}`
test_integrations:
@bash ./test/test.sh
test_integrations:
bash ./test/test.sh
test100: build
for i in {1..100}; do make test; done
test100:
@for i in {1..100}; do make test; done
draw_deps:
# requires brew install graphviz
go get github.com/hirokidaichi/goviz
goviz -i github.com/tendermint/tendermint/cmd/tendermint | dot -Tpng -o huge.png
# requires brew install graphviz or apt-get install graphviz
go get github.com/RobotsAndPencils/goviz
@goviz -i github.com/tendermint/tendermint/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png
list_deps:
go list -f '{{join .Deps "\n"}}' github.com/tendermint/tendermint/... | \
@go list -f '{{join .Deps "\n"}}' ./... | \
grep -v /vendor/ | sort | uniq | \
xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}'
xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}'
get_deps:
go get -d `${NOVENDOR}`
go list -f '{{join .TestImports "\n"}}' github.com/tendermint/tendermint/... | \
@echo "--> Running go get"
@go get -v -d $(PACKAGES)
@go list -f '{{join .TestImports "\n"}}' ./... | \
grep -v /vendor/ | sort | uniq | \
xargs go get
xargs go get -v -d
get_vendor_deps:
go get github.com/Masterminds/glide
glide install
get_vendor_deps: ensure_tools
@rm -rf vendor/
@echo "--> Running glide install"
@glide install
update_deps:
go get -d -u github.com/tendermint/tendermint/...
update_deps: tools
@echo "--> Updating dependencies"
@go get -d -u ./...
revision:
-echo `git rev-parse --verify HEAD` > $(TMROOT)/revision
-echo `git rev-parse --verify HEAD` >> $(TMROOT)/revision_history
-echo `git rev-parse --verify HEAD` > $(TMHOME)/revision
-echo `git rev-parse --verify HEAD` >> $(TMHOME)/revision_history
tools:
go get -u -v $(GOTOOLS)
ensure_tools:
go get $(GOTOOLS)
.PHONY: install build build_race dist test test_race test_integrations test100 draw_deps list_deps get_deps get_vendor_deps update_deps revision tools

View File

@@ -1,43 +1,71 @@
# Tendermint
Simple, Secure, Scalable Blockchain Platform
[![CircleCI](https://circleci.com/gh/tendermint/tendermint.svg?style=svg)](https://circleci.com/gh/tendermint/tendermint)
[![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint)
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication).
Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short.
_NOTE: This is yet pre-alpha non-production-quality software._
[![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest)
[![API Reference](
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
)](https://godoc.org/github.com/tendermint/tendermint)
[![chat](https://img.shields.io/badge/slack-join%20chat-pink.svg)](http://forum.tendermint.com:3000/)
[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE)
[![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint)
Branch | Tests | Coverage | Report Card
----------|-------|----------|-------------
develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | [![Go Report Card](https://goreportcard.com/badge/github.com/tendermint/tendermint/tree/develop)](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/develop)
master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | [![Go Report Card](https://goreportcard.com/badge/github.com/tendermint/tendermint/tree/master)](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/master)
_NOTE: This is alpha software. Please contact us if you intend to run it in production._
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language,
and replicates it on many machines.
See the [application developers guide](https://github.com/tendermint/tendermint/wiki/Application-Developers) to get started.
and securely replicates it on many machines.
For more background, see the [introduction](https://tendermint.com/intro).
To get started developing applications, see the [application developers guide](https://tendermint.com/docs/guides/app-development).
## Install
To download pre-built binaries, see our [downloads page](https://tendermint.com/intro/getting-started/download).
To install from source, you should be able to:
`go get -u github.com/tendermint/tendermint/cmd/tendermint`
For more details (or if it fails), see the [install guide](https://tendermint.com/docs/guides/install).
## Contributing
Yay open source! Please see our [contributing guidelines](https://github.com/tendermint/tendermint/wiki/Contributing).
Yay open source! Please see our [contributing guidelines](https://tendermint.com/docs/guides/contributing).
## Resources
### Tendermint Core
- [Introduction](https://github.com/tendermint/tendermint/wiki/Introduction)
- [Validators](https://github.com/tendermint/tendermint/wiki/Validators)
- [Byzantine Consensus Algorithm](https://github.com/tendermint/tendermint/wiki/Byzantine-Consensus-Algorithm)
- [Block Structure](https://github.com/tendermint/tendermint/wiki/Block-Structure)
- [RPC](https://github.com/tendermint/tendermint/wiki/RPC)
- [Genesis](https://github.com/tendermint/tendermint/wiki/Genesis)
- [Configuration](https://github.com/tendermint/tendermint/wiki/Configuration)
- [Light Client Protocol](https://github.com/tendermint/tendermint/wiki/Light-Client-Protocol)
- [Roadmap for V2](https://github.com/tendermint/tendermint/wiki/Roadmap-for-V2)
- [Introduction](https://tendermint.com/intro)
- [Docs](https://tendermint.com/docs)
- [Software using Tendermint](https://tendermint.com/ecosystem)
### Sub-projects
* [TMSP](http://github.com/tendermint/tmsp)
* [ABCI](http://github.com/tendermint/abci)
* [Mintnet](http://github.com/tendermint/mintnet)
* [Go-Wire](http://github.com/tendermint/go-wire)
* [Go-P2P](http://github.com/tendermint/go-p2p)
* [Go-Merkle](http://github.com/tendermint/go-merkle)
## Install
### Applications
`go get -u github.com/tendermint/tendermint/cmd/tendermint`
* [Ethermint](http://github.com/tendermint/ethermint)
* [Basecoin](http://github.com/tendermint/basecoin)
### More
* [Tendermint Blog](https://tendermint.com/blog)
* [Cosmos Blog](https://cosmos.network/blog)
* [Original Whitepaper (out-of-date)](http://www.the-blockchain.com/docs/Tendermint%20Consensus%20without%20Mining.pdf)
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
For more details, see the [install guide](https://github.com/tendermint/tendermint/wiki/Installation).

49
Vagrantfile vendored
View File

@@ -1,25 +1,38 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/trusty64"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "phusion-open-ubuntu-14.04-amd64"
config.vm.box_url = "https://oss-binaries.phusionpassenger.com/vagrant/boxes/latest/ubuntu-14.04-amd64-vbox.box"
# Or, for Ubuntu 12.04:
config.vm.provider :vmware_fusion do |f, override|
override.vm.box_url = "https://oss-binaries.phusionpassenger.com/vagrant/boxes/latest/ubuntu-14.04-amd64-vmwarefusion.box"
config.vm.provider "virtualbox" do |v|
v.memory = 3072
v.cpus = 2
end
if Dir.glob("#{File.dirname(__FILE__)}/.vagrant/machines/default/*/id").empty?
# Install Docker
pkg_cmd = "wget -q -O - https://get.docker.io/gpg | apt-key add -;" \
"echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list;" \
"apt-get update -qq; apt-get install -q -y --force-yes lxc-docker; "
# Add vagrant user to the docker group
pkg_cmd << "usermod -a -G docker vagrant; "
config.vm.provision :shell, :inline => pkg_cmd
end
config.vm.provision "shell", inline: <<-SHELL
apt-get update
apt-get install -y --no-install-recommends wget curl jq shellcheck bsdmainutils psmisc
wget -qO- https://get.docker.com/ | sh
usermod -a -G docker vagrant
apt-get autoremove -y
curl -O https://storage.googleapis.com/golang/go1.8.linux-amd64.tar.gz
tar -xvf go1.8.linux-amd64.tar.gz
rm -rf /usr/local/go
mv go /usr/local
rm -f go1.8.linux-amd64.tar.gz
mkdir -p /home/vagrant/go/bin
echo 'export PATH=$PATH:/usr/local/go/bin:/home/vagrant/go/bin' >> /home/vagrant/.bash_profile
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
mkdir -p /home/vagrant/go/src/github.com/tendermint
ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint
chown -R vagrant:vagrant /home/vagrant/go
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_vendor_deps'
SHELL
end

View File

@@ -37,7 +37,9 @@ func main() {
for i := 0; ; i++ {
binary.BigEndian.PutUint64(buf, uint64(i))
//txBytes := hex.EncodeToString(buf[:n])
request := rpctypes.NewRPCRequest("fakeid", "broadcast_tx", Arr(buf[:8]))
request := rpctypes.NewRPCRequest("fakeid",
"broadcast_tx",
map[string]interface{}{"tx": buf[:8]})
reqBytes := wire.JSONBytes(request)
//fmt.Println("!!", string(reqBytes))
fmt.Print(".")

View File

@@ -5,8 +5,8 @@ import (
"sync"
"time"
flow "github.com/tendermint/flowcontrol"
. "github.com/tendermint/go-common"
flow "github.com/tendermint/go-flowrate/flowrate"
"github.com/tendermint/tendermint/types"
)
@@ -26,13 +26,13 @@ var peerTimeoutSeconds = time.Duration(15) // not const so we can override with
in sequence from peers that reported higher heights than ours.
Every so often we ask peers what height they're on so we can keep going.
Requests are continuously made for blocks of heigher heights until
Requests are continuously made for blocks of higher heights until
the limits. If most of the requests have no available peers, and we
are not at peer limits, we can probably switch to consensus reactor
*/
type BlockPool struct {
QuitService
BaseService
startTime time.Time
mtx sync.Mutex
@@ -58,19 +58,18 @@ func NewBlockPool(start int, requestsCh chan<- BlockRequest, timeoutsCh chan<- s
requestsCh: requestsCh,
timeoutsCh: timeoutsCh,
}
bp.QuitService = *NewQuitService(log, "BlockPool", bp)
bp.BaseService = *NewBaseService(log, "BlockPool", bp)
return bp
}
func (pool *BlockPool) OnStart() error {
pool.QuitService.OnStart()
go pool.makeRequestersRoutine()
pool.startTime = time.Now()
return nil
}
func (pool *BlockPool) OnStop() {
pool.QuitService.OnStop()
pool.BaseService.OnStop()
}
// Run spawns requesters as needed.
@@ -241,7 +240,9 @@ func (pool *BlockPool) RemovePeer(peerID string) {
func (pool *BlockPool) removePeer(peerID string) {
for _, requester := range pool.requesters {
if requester.getPeerID() == peerID {
pool.numPending++
if requester.getBlock() != nil {
pool.numPending++
}
go requester.redo() // pick another peer and ...
}
}
@@ -383,7 +384,7 @@ func (peer *bpPeer) onTimeout() {
//-------------------------------------
type bpRequester struct {
QuitService
BaseService
pool *BlockPool
height int
gotBlockCh chan struct{}
@@ -404,12 +405,11 @@ func newBPRequester(pool *BlockPool, height int) *bpRequester {
peerID: "",
block: nil,
}
bpr.QuitService = *NewQuitService(nil, "bpRequester", bpr)
bpr.BaseService = *NewBaseService(nil, "bpRequester", bpr)
return bpr
}
func (bpr *bpRequester) OnStart() error {
bpr.QuitService.OnStart()
go bpr.requestRoutine()
return nil
}

View File

@@ -35,6 +35,7 @@ func TestBasic(t *testing.T) {
requestsCh := make(chan BlockRequest, 100)
pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool.Start()
defer pool.Stop()
// Introduce each peer.
go func() {
@@ -76,8 +77,6 @@ func TestBasic(t *testing.T) {
}()
}
}
pool.Stop()
}
func TestTimeout(t *testing.T) {
@@ -87,6 +86,7 @@ func TestTimeout(t *testing.T) {
requestsCh := make(chan BlockRequest, 100)
pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool.Start()
defer pool.Stop()
for _, peer := range peers {
log.Info("Peer", "id", peer.id)
@@ -131,6 +131,4 @@ func TestTimeout(t *testing.T) {
log.Info("TEST: Pulled new BlockRequest", "request", request)
}
}
pool.Stop()
}

View File

@@ -3,12 +3,11 @@ package blockchain
import (
"bytes"
"errors"
"fmt"
"reflect"
"time"
. "github.com/tendermint/go-common"
"github.com/tendermint/go-events"
cmn "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/proxy"
@@ -17,7 +16,9 @@ import (
)
const (
BlockchainChannel = byte(0x40)
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
BlockchainChannel = byte(0x40)
defaultChannelCapacity = 100
defaultSleepIntervalMS = 500
trySyncIntervalMS = 100
@@ -42,9 +43,9 @@ type consensusReactor interface {
type BlockchainReactor struct {
p2p.BaseReactor
sw *p2p.Switch
config cfg.Config
state *sm.State
proxyAppConn proxy.AppConn // same as consensus.proxyAppConn
proxyAppConn proxy.AppConnConsensus // same as consensus.proxyAppConn
store *BlockStore
pool *BlockPool
fastSync bool
@@ -52,15 +53,16 @@ type BlockchainReactor struct {
timeoutsCh chan string
lastBlock *types.Block
evsw *events.EventSwitch
evsw types.EventSwitch
}
func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConn, store *BlockStore, fastSync bool) *BlockchainReactor {
// NewBlockchainReactor returns new reactor instance.
func NewBlockchainReactor(config cfg.Config, state *sm.State, proxyAppConn proxy.AppConnConsensus, store *BlockStore, fastSync bool) *BlockchainReactor {
if state.LastBlockHeight == store.Height()-1 {
store.height -= 1 // XXX HACK, make this better
store.height-- // XXX HACK, make this better
}
if state.LastBlockHeight != store.Height() {
PanicSanity(Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()))
cmn.PanicSanity(cmn.Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()))
}
requestsCh := make(chan BlockRequest, defaultChannelCapacity)
timeoutsCh := make(chan string, defaultChannelCapacity)
@@ -70,6 +72,7 @@ func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConn, store *Bl
timeoutsCh,
)
bcR := &BlockchainReactor{
config: config,
state: state,
proxyAppConn: proxyAppConn,
store: store,
@@ -82,6 +85,7 @@ func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConn, store *Bl
return bcR
}
// OnStart implements BaseService
func (bcR *BlockchainReactor) OnStart() error {
bcR.BaseReactor.OnStart()
if bcR.fastSync {
@@ -94,12 +98,13 @@ func (bcR *BlockchainReactor) OnStart() error {
return nil
}
// OnStop implements BaseService
func (bcR *BlockchainReactor) OnStop() {
bcR.BaseReactor.OnStop()
bcR.pool.Stop()
}
// Implements Reactor
// GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
&p2p.ChannelDescriptor{
@@ -110,19 +115,19 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
}
}
// Implements Reactor
// AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer *p2p.Peer) {
// Send peer our state.
peer.Send(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}})
if !peer.Send(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) {
// doing nothing, will try later in `poolRoutine`
}
}
// Implements Reactor
// RemovePeer implements Reactor by removing peer from the pool.
func (bcR *BlockchainReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
// Remove peer from the pool.
bcR.pool.RemovePeer(peer.Key)
}
// Implements Reactor
// Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes)
if err != nil {
@@ -130,7 +135,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
return
}
log.Notice("Receive", "src", src, "chID", chID, "msg", msg)
log.Debug("Receive", "src", src, "chID", chID, "msg", msg)
switch msg := msg.(type) {
case *bcBlockRequestMessage:
@@ -158,7 +163,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
// Got a peer status. Unverified.
bcR.pool.SetPeerHeight(src.Key, msg.Height)
default:
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
log.Warn(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
}
@@ -220,31 +225,32 @@ FOR_LOOP:
// We need both to sync the first block.
break SYNC_LOOP
}
firstParts := first.MakePartSet()
firstParts := first.MakePartSet(bcR.config.GetInt("block_part_size")) // TODO: put part size in parts header?
firstPartsHeader := firstParts.Header()
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err := bcR.state.Validators.VerifyCommit(
bcR.state.ChainID, first.Hash(), firstPartsHeader, first.Height, second.LastCommit)
bcR.state.ChainID, types.BlockID{first.Hash(), firstPartsHeader}, first.Height, second.LastCommit)
if err != nil {
log.Info("error in validation", "error", err)
bcR.pool.RedoRequest(first.Height)
break SYNC_LOOP
} else {
bcR.pool.PopRequest()
err := bcR.state.ExecBlock(bcR.evsw, bcR.proxyAppConn, first, firstPartsHeader)
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: should we be firing events? need to fire NewBlock events manually ...
// NOTE: we could improve performance if we
// didn't make the app commit to disk every block
// ... but we would need a way to get the hash without it persisting
err := bcR.state.ApplyBlock(bcR.evsw, bcR.proxyAppConn, first, firstPartsHeader, types.MockMempool{})
if err != nil {
// TODO This is bad, are we zombie?
PanicQ(Fmt("Failed to process committed block: %v", err))
cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
/*
err = bcR.proxyAppConn.CommitSync()
if err != nil {
// TODO Handle gracefully.
PanicQ(Fmt("Failed to commit block at application: %v", err))
}
*/
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
bcR.state.Save()
}
}
continue FOR_LOOP
@@ -254,18 +260,14 @@ FOR_LOOP:
}
}
func (bcR *BlockchainReactor) BroadcastStatusResponse() error {
bcR.Switch.Broadcast(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}})
return nil
}
// BroadcastStatusRequest broadcasts `BlockStore` height.
func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
bcR.Switch.Broadcast(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusRequestMessage{bcR.store.Height()}})
return nil
}
// implements events.Eventable
func (bcR *BlockchainReactor) SetEventSwitch(evsw *events.EventSwitch) {
// SetEventSwitch implements events.Eventable
func (bcR *BlockchainReactor) SetEventSwitch(evsw types.EventSwitch) {
bcR.evsw = evsw
}
@@ -279,6 +281,7 @@ const (
msgTypeStatusRequest = byte(0x21)
)
// BlockchainMessage is a generic message for this reactor.
type BlockchainMessage interface{}
var _ = wire.RegisterInterface(
@@ -289,6 +292,7 @@ var _ = wire.RegisterInterface(
wire.ConcreteType{&bcStatusRequestMessage{}, msgTypeStatusRequest},
)
// DecodeMessage decodes BlockchainMessage.
// TODO: ensure that bz is completely read.
func DecodeMessage(bz []byte) (msgType byte, msg BlockchainMessage, err error) {
msgType = bz[0]
@@ -296,7 +300,7 @@ func DecodeMessage(bz []byte) (msgType byte, msg BlockchainMessage, err error) {
r := bytes.NewReader(bz)
msg = wire.ReadBinary(struct{ BlockchainMessage }{}, r, maxBlockchainResponseSize, &n, &err).(struct{ BlockchainMessage }).BlockchainMessage
if err != nil && n != len(bz) {
err = errors.New("DecodeMessage() had bytes left over.")
err = errors.New("DecodeMessage() had bytes left over")
}
return
}
@@ -308,7 +312,7 @@ type bcBlockRequestMessage struct {
}
func (m *bcBlockRequestMessage) String() string {
return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height)
return cmn.Fmt("[bcBlockRequestMessage %v]", m.Height)
}
//-------------------------------------
@@ -319,7 +323,7 @@ type bcBlockResponseMessage struct {
}
func (m *bcBlockResponseMessage) String() string {
return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height)
return cmn.Fmt("[bcBlockResponseMessage %v]", m.Block.Height)
}
//-------------------------------------
@@ -329,7 +333,7 @@ type bcStatusRequestMessage struct {
}
func (m *bcStatusRequestMessage) String() string {
return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height)
return cmn.Fmt("[bcStatusRequestMessage %v]", m.Height)
}
//-------------------------------------
@@ -339,5 +343,5 @@ type bcStatusResponseMessage struct {
}
func (m *bcStatusResponseMessage) String() string {
return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height)
return cmn.Fmt("[bcStatusResponseMessage %v]", m.Height)
}

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"io"
"sync"
. "github.com/tendermint/go-common"
dbm "github.com/tendermint/go-db"
@@ -27,8 +28,10 @@ the Commit data outside the Block.
Panics indicate probable corruption in the data
*/
type BlockStore struct {
db dbm.DB
mtx sync.RWMutex
height int
db dbm.DB
}
func NewBlockStore(db dbm.DB) *BlockStore {
@@ -41,6 +44,8 @@ func NewBlockStore(db dbm.DB) *BlockStore {
// Height() returns the last known contiguous block height.
func (bs *BlockStore) Height() int {
bs.mtx.RLock()
defer bs.mtx.RUnlock()
return bs.height
}
@@ -59,12 +64,12 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block {
if r == nil {
return nil
}
meta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil {
PanicCrisis(Fmt("Error reading block meta: %v", err))
}
bytez := []byte{}
for i := 0; i < meta.PartsHeader.Total; i++ {
for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ {
part := bs.LoadBlockPart(height, i)
bytez = append(bytez, part.Bytes...)
}
@@ -96,11 +101,11 @@ func (bs *BlockStore) LoadBlockMeta(height int) *types.BlockMeta {
if r == nil {
return nil
}
meta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil {
PanicCrisis(Fmt("Error reading block meta: %v", err))
}
return meta
return blockMeta
}
// The +2/3 and other Precommit-votes for block at `height`.
@@ -141,16 +146,16 @@ func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit {
// most recent height. Otherwise they'd stall at H-1.
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
height := block.Height
if height != bs.height+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.height+1, height))
if height != bs.Height()+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
}
if !blockParts.IsComplete() {
PanicSanity(Fmt("BlockStore can only save complete block part sets"))
}
// Save block meta
meta := types.NewBlockMeta(block, blockParts)
metaBytes := wire.BinaryBytes(meta)
blockMeta := types.NewBlockMeta(block, blockParts)
metaBytes := wire.BinaryBytes(blockMeta)
bs.db.Set(calcBlockMetaKey(height), metaBytes)
// Save block parts
@@ -163,6 +168,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes)
// Save seen commit (seen +2/3 precommits for block)
// NOTE: we can delete this at a later height
seenCommitBytes := wire.BinaryBytes(seenCommit)
bs.db.Set(calcSeenCommitKey(height), seenCommitBytes)
@@ -170,12 +176,17 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
BlockStoreStateJSON{Height: height}.Save(bs.db)
// Done!
bs.mtx.Lock()
bs.height = height
bs.mtx.Unlock()
// Flush
bs.db.SetSync(nil, nil)
}
func (bs *BlockStore) saveBlockPart(height int, index int, part *types.Part) {
if height != bs.height+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.height+1, height))
if height != bs.Height()+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
}
partBytes := wire.BinaryBytes(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes)
@@ -212,7 +223,7 @@ func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
if err != nil {
PanicSanity(Fmt("Could not marshal state bytes: %v", err))
}
db.Set(blockStoreKey, bytes)
db.SetSync(blockStoreKey, bytes)
}
func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {

View File

@@ -1,35 +1,33 @@
---
machine:
environment:
MACH_PREFIX: tendermint-test-mach
GOPATH: /home/ubuntu/.go_workspace
REPO: $GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
DOCKER_VERSION: 1.10.0
DOCKER_MACHINE_VERSION: 0.6.0
DOCKER_MACHINE_VERSION: 0.9.0
GOPATH: "$HOME/.go_project"
PROJECT_PARENT_PATH: "$GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME"
PROJECT_PATH: "$PROJECT_PARENT_PATH/$CIRCLE_PROJECT_REPONAME"
hosts:
circlehost: 127.0.0.1
localhost: 127.0.0.1
pre:
- curl -sSL https://s3.amazonaws.com/circle-downloads/install-circleci-docker.sh | sudo bash -s -- $DOCKER_VERSION
services:
- docker
checkout:
post:
- rm -rf $REPO
- mkdir -p $HOME/.go_workspace/src/github.com/$CIRCLE_PROJECT_USERNAME
- mv $HOME/$CIRCLE_PROJECT_REPONAME $REPO
# - git submodule sync
# - git submodule update --init # use submodules
dependencies:
override:
- sudo curl -sSL -o /usr/bin/docker-machine https://github.com/docker/machine/releases/download/v$DOCKER_MACHINE_VERSION/docker-machine-linux-x86_64; sudo chmod 0755 /usr/bin/docker-machine
- curl -sSL https://s3.amazonaws.com/circle-downloads/install-circleci-docker.sh | sudo bash -s -- $DOCKER_VERSION
- sudo start docker
- sudo curl -sSL -o /usr/bin/docker-machine "https://github.com/docker/machine/releases/download/v$DOCKER_MACHINE_VERSION/docker-machine-`uname -s`-`uname -m`"; sudo chmod 0755 /usr/bin/docker-machine
- mkdir -p "$PROJECT_PARENT_PATH"
- ln -sf "$HOME/$CIRCLE_PROJECT_REPONAME/" "$PROJECT_PATH"
post:
- go version
- docker version
- docker-machine version
test:
override:
- "cd $REPO && make test_integrations"
- cd "$PROJECT_PATH" && set -o pipefail && make test_integrations 2>&1 | tee test_integrations.log:
timeout: 1800
post:
- bash <(curl -s https://codecov.io/bash)
- cd "$PROJECT_PATH" && mv test_integrations.log "${CIRCLE_ARTIFACTS}"
- cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt
- cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}"
- cd "$PROJECT_PATH" && cp test/logs/messages "${CIRCLE_ARTIFACTS}/docker_logs.txt"

View File

@@ -0,0 +1,27 @@
package commands
import (
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
)
var genValidatorCmd = &cobra.Command{
Use: "gen_validator",
Short: "Generate new validator keypair",
Run: genValidator,
}
func init() {
RootCmd.AddCommand(genValidatorCmd)
}
func genValidator(cmd *cobra.Command, args []string) {
privValidator := types.GenPrivValidator()
privValidatorJSONBytes := wire.JSONBytesPretty(privValidator)
fmt.Printf(`%v
`, string(privValidatorJSONBytes))
}

View File

@@ -0,0 +1,47 @@
package commands
import (
"os"
"github.com/spf13/cobra"
cmn "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/types"
)
var initFilesCmd = &cobra.Command{
Use: "init",
Short: "Initialize Tendermint",
Run: initFiles,
}
func init() {
RootCmd.AddCommand(initFilesCmd)
}
func initFiles(cmd *cobra.Command, args []string) {
privValFile := config.GetString("priv_validator_file")
if _, err := os.Stat(privValFile); os.IsNotExist(err) {
privValidator := types.GenPrivValidator()
privValidator.SetFile(privValFile)
privValidator.Save()
genFile := config.GetString("genesis_file")
if _, err := os.Stat(genFile); os.IsNotExist(err) {
genDoc := types.GenesisDoc{
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
}
genDoc.Validators = []types.GenesisValidator{types.GenesisValidator{
PubKey: privValidator.PubKey,
Amount: 10,
}}
genDoc.SaveAs(genFile)
}
log.Notice("Initialized tendermint", "genesis", config.GetString("genesis_file"), "priv_validator", config.GetString("priv_validator_file"))
} else {
log.Notice("Already initialized", "priv_validator", config.GetString("priv_validator_file"))
}
}

View File

@@ -1,13 +1,25 @@
package main
package commands
import (
"encoding/json"
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/go-p2p/upnp"
)
func probe_upnp() {
var probeUpnpCmd = &cobra.Command{
Use: "probe_upnp",
Short: "Test UPnP functionality",
Run: probeUpnp,
}
func init() {
RootCmd.AddCommand(probeUpnpCmd)
}
func probeUpnp(cmd *cobra.Command, args []string) {
capabilities, err := upnp.Probe()
if err != nil {

View File

@@ -0,0 +1,40 @@
package commands
import (
"fmt"
"github.com/tendermint/tendermint/consensus"
"github.com/spf13/cobra"
)
var replayCmd = &cobra.Command{
Use: "replay [walfile]",
Short: "Replay messages from WAL",
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 1 {
consensus.RunReplayFile(config, args[1], false)
} else {
fmt.Println("replay requires an argument (walfile)")
}
},
}
var replayConsoleCmd = &cobra.Command{
Use: "replay_console [walfile]",
Short: "Replay messages from WAL in a console",
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 1 {
consensus.RunReplayFile(config, args[1], true)
} else {
fmt.Println("replay_console requires an argument (walfile)")
}
},
}
func init() {
RootCmd.AddCommand(replayCmd)
RootCmd.AddCommand(replayConsoleCmd)
}

View File

@@ -0,0 +1,62 @@
package commands
import (
"os"
"github.com/spf13/cobra"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/log15"
"github.com/tendermint/tendermint/types"
)
var resetAllCmd = &cobra.Command{
Use: "unsafe_reset_all",
Short: "(unsafe) Remove all the data and WAL, reset this node's validator",
Run: resetAll,
}
var resetPrivValidatorCmd = &cobra.Command{
Use: "unsafe_reset_priv_validator",
Short: "(unsafe) Reset this node's validator",
Run: resetPrivValidator,
}
func init() {
RootCmd.AddCommand(resetAllCmd)
RootCmd.AddCommand(resetPrivValidatorCmd)
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetAll(cmd *cobra.Command, args []string) {
ResetAll(config, log)
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetPrivValidator(cmd *cobra.Command, args []string) {
ResetPrivValidator(config, log)
}
// Exported so other CLI tools can use it
func ResetAll(c cfg.Config, l log15.Logger) {
ResetPrivValidator(c, l)
os.RemoveAll(c.GetString("db_dir"))
}
func ResetPrivValidator(c cfg.Config, l log15.Logger) {
// Get PrivValidator
var privValidator *types.PrivValidator
privValidatorFile := c.GetString("priv_validator_file")
if _, err := os.Stat(privValidatorFile); err == nil {
privValidator = types.LoadPrivValidator(privValidatorFile)
privValidator.Reset()
l.Notice("Reset PrivValidator", "file", privValidatorFile)
} else {
privValidator = types.GenPrivValidator()
privValidator.SetFile(privValidatorFile)
privValidator.Save()
l.Notice("Generated PrivValidator", "file", privValidatorFile)
}
}

View File

@@ -0,0 +1,31 @@
package commands
import (
"github.com/spf13/cobra"
"github.com/tendermint/go-logger"
tmcfg "github.com/tendermint/tendermint/config/tendermint"
)
var (
config = tmcfg.GetConfig("")
log = logger.New("module", "main")
)
//global flag
var logLevel string
var RootCmd = &cobra.Command{
Use: "tendermint",
Short: "Tendermint Core (BFT Consensus) in Go",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
// set the log level in the config and logger
config.Set("log_level", logLevel)
logger.SetLogLevel(logLevel)
},
}
func init() {
//parse flag and set config
RootCmd.PersistentFlags().StringVar(&logLevel, "log_level", config.GetString("log_level"), "Log level")
}

View File

@@ -0,0 +1,124 @@
package commands
import (
"io/ioutil"
"time"
"github.com/spf13/cobra"
. "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/types"
)
var runNodeCmd = &cobra.Command{
Use: "node",
Short: "Run the tendermint node",
PreRun: setConfigFlags,
Run: runNode,
}
//flags
var (
moniker string
nodeLaddr string
seeds string
fastSync bool
skipUPNP bool
rpcLaddr string
grpcLaddr string
proxyApp string
abciTransport string
pex bool
)
func init() {
// configuration options
runNodeCmd.Flags().StringVar(&moniker, "moniker", config.GetString("moniker"),
"Node Name")
runNodeCmd.Flags().StringVar(&nodeLaddr, "node_laddr", config.GetString("node_laddr"),
"Node listen address. (0.0.0.0:0 means any interface, any port)")
runNodeCmd.Flags().StringVar(&seeds, "seeds", config.GetString("seeds"),
"Comma delimited host:port seed nodes")
runNodeCmd.Flags().BoolVar(&fastSync, "fast_sync", config.GetBool("fast_sync"),
"Fast blockchain syncing")
runNodeCmd.Flags().BoolVar(&skipUPNP, "skip_upnp", config.GetBool("skip_upnp"),
"Skip UPNP configuration")
runNodeCmd.Flags().StringVar(&rpcLaddr, "rpc_laddr", config.GetString("rpc_laddr"),
"RPC listen address. Port required")
runNodeCmd.Flags().StringVar(&grpcLaddr, "grpc_laddr", config.GetString("grpc_laddr"),
"GRPC listen address (BroadcastTx only). Port required")
runNodeCmd.Flags().StringVar(&proxyApp, "proxy_app", config.GetString("proxy_app"),
"Proxy app address, or 'nilapp' or 'dummy' for local testing.")
runNodeCmd.Flags().StringVar(&abciTransport, "abci", config.GetString("abci"),
"Specify abci transport (socket | grpc)")
// feature flags
runNodeCmd.Flags().BoolVar(&pex, "pex", config.GetBool("pex_reactor"),
"Enable Peer-Exchange (dev feature)")
RootCmd.AddCommand(runNodeCmd)
}
func setConfigFlags(cmd *cobra.Command, args []string) {
// Merge parsed flag values onto config
config.Set("moniker", moniker)
config.Set("node_laddr", nodeLaddr)
config.Set("seeds", seeds)
config.Set("fast_sync", fastSync)
config.Set("skip_upnp", skipUPNP)
config.Set("rpc_laddr", rpcLaddr)
config.Set("grpc_laddr", grpcLaddr)
config.Set("proxy_app", proxyApp)
config.Set("abci", abciTransport)
config.Set("pex_reactor", pex)
}
// Users wishing to:
// * Use an external signer for their validators
// * Supply an in-proc abci app
// should import github.com/tendermint/tendermint/node and implement
// their own run_node to call node.NewNode (instead of node.NewNodeDefault)
// with their custom priv validator and/or custom proxy.ClientCreator
func runNode(cmd *cobra.Command, args []string) {
// Wait until the genesis doc becomes available
// This is for Mintnet compatibility.
// TODO: If Mintnet gets deprecated or genesis_file is
// always available, remove.
genDocFile := config.GetString("genesis_file")
if !FileExists(genDocFile) {
log.Notice(Fmt("Waiting for genesis file %v...", genDocFile))
for {
time.Sleep(time.Second)
if !FileExists(genDocFile) {
continue
}
jsonBlob, err := ioutil.ReadFile(genDocFile)
if err != nil {
Exit(Fmt("Couldn't read GenesisDoc file: %v", err))
}
genDoc, err := types.GenesisDocFromJSON(jsonBlob)
if err != nil {
Exit(Fmt("Error reading GenesisDoc: %v", err))
}
if genDoc.ChainID == "" {
Exit(Fmt("Genesis doc %v must include non-empty chain_id", genDocFile))
}
config.Set("chain_id", genDoc.ChainID)
}
}
// Create & start node
n := node.NewNodeDefault(config)
if _, err := n.Start(); err != nil {
Exit(Fmt("Failed to start node: %v", err))
} else {
log.Notice("Started node", "nodeInfo", n.Switch().NodeInfo())
}
// Trap signal, run forever.
n.RunForever()
}

View File

@@ -1,13 +1,25 @@
package main
package commands
import (
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
)
func show_validator() {
var showValidatorCmd = &cobra.Command{
Use: "show_validator",
Short: "Show this node's validator info",
Run: showValidator,
}
func init() {
RootCmd.AddCommand(showValidatorCmd)
}
func showValidator(cmd *cobra.Command, args []string) {
privValidatorFile := config.GetString("priv_validator_file")
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
fmt.Println(string(wire.JSONBytesPretty(privValidator.PubKey)))

View File

@@ -0,0 +1,93 @@
package commands
import (
"fmt"
"path"
"time"
"github.com/spf13/cobra"
cmn "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/types"
)
var testnetFilesCmd = &cobra.Command{
Use: "testnet",
Short: "Initialize files for a Tendermint testnet",
Run: testnetFiles,
}
//flags
var (
nValidators int
dataDir string
)
func init() {
testnetFilesCmd.Flags().IntVar(&nValidators, "n", 4,
"Number of validators to initialize the testnet with")
testnetFilesCmd.Flags().StringVar(&dataDir, "dir", "mytestnet",
"Directory to store initialization data for the testnet")
RootCmd.AddCommand(testnetFilesCmd)
}
func testnetFiles(cmd *cobra.Command, args []string) {
genVals := make([]types.GenesisValidator, nValidators)
// Initialize core dir and priv_validator.json's
for i := 0; i < nValidators; i++ {
mach := cmn.Fmt("mach%d", i)
err := initMachCoreDirectory(dataDir, mach)
if err != nil {
cmn.Exit(err.Error())
}
// Read priv_validator.json to populate vals
privValFile := path.Join(dataDir, mach, "priv_validator.json")
privVal := types.LoadPrivValidator(privValFile)
genVals[i] = types.GenesisValidator{
PubKey: privVal.PubKey,
Amount: 1,
Name: mach,
}
}
// Generate genesis doc from generated validators
genDoc := &types.GenesisDoc{
GenesisTime: time.Now(),
ChainID: "chain-" + cmn.RandStr(6),
Validators: genVals,
}
// Write genesis file.
for i := 0; i < nValidators; i++ {
mach := cmn.Fmt("mach%d", i)
genDoc.SaveAs(path.Join(dataDir, mach, "genesis.json"))
}
fmt.Println(cmn.Fmt("Successfully initialized %v node directories", nValidators))
}
// Initialize per-machine core directory
func initMachCoreDirectory(base, mach string) error {
dir := path.Join(base, mach)
err := cmn.EnsureDir(dir, 0777)
if err != nil {
return err
}
// Create priv_validator.json file if not present
ensurePrivValidator(path.Join(dir, "priv_validator.json"))
return nil
}
func ensurePrivValidator(file string) {
if cmn.FileExists(file) {
return
}
privValidator := types.GenPrivValidator()
privValidator.SetFile(file)
privValidator.Save()
}

View File

@@ -0,0 +1,21 @@
package commands
import (
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/version"
)
var versionCmd = &cobra.Command{
Use: "version",
Short: "Show version info",
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(version.Version)
},
}
func init() {
RootCmd.AddCommand(versionCmd)
}

View File

@@ -1,53 +0,0 @@
package main
import (
flag "github.com/spf13/pflag"
"os"
cfg "github.com/tendermint/go-config"
)
func parseFlags(config cfg.Config, args []string) {
var (
printHelp bool
moniker string
nodeLaddr string
seeds string
fastSync bool
skipUPNP bool
rpcLaddr string
logLevel string
proxyApp string
tmspTransport string
)
// Declare flags
var flags = flag.NewFlagSet("main", flag.ExitOnError)
flags.BoolVar(&printHelp, "help", false, "Print this help message.")
flags.StringVar(&moniker, "moniker", config.GetString("moniker"), "Node Name")
flags.StringVar(&nodeLaddr, "node_laddr", config.GetString("node_laddr"), "Node listen address. (0.0.0.0:0 means any interface, any port)")
flags.StringVar(&seeds, "seeds", config.GetString("seeds"), "Comma delimited host:port seed nodes")
flags.BoolVar(&fastSync, "fast_sync", config.GetBool("fast_sync"), "Fast blockchain syncing")
flags.BoolVar(&skipUPNP, "skip_upnp", config.GetBool("skip_upnp"), "Skip UPNP configuration")
flags.StringVar(&rpcLaddr, "rpc_laddr", config.GetString("rpc_laddr"), "RPC listen address. Port required")
flags.StringVar(&logLevel, "log_level", config.GetString("log_level"), "Log level")
flags.StringVar(&proxyApp, "proxy_app", config.GetString("proxy_app"),
"Proxy app address, or 'nilapp' or 'dummy' for local testing.")
flags.StringVar(&tmspTransport, "tmsp", config.GetString("tmsp"), "Specify tmsp transport (socket | grpc)")
flags.Parse(args)
if printHelp {
flags.PrintDefaults()
os.Exit(0)
}
// Merge parsed flag values onto app.
config.Set("moniker", moniker)
config.Set("node_laddr", nodeLaddr)
config.Set("seeds", seeds)
config.Set("fast_sync", fastSync)
config.Set("skip_upnp", skipUPNP)
config.Set("rpc_laddr", rpcLaddr)
config.Set("log_level", logLevel)
config.Set("proxy_app", proxyApp)
config.Set("tmsp", tmspTransport)
}

View File

@@ -1,23 +0,0 @@
package main
import (
"fmt"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
)
func gen_validator() {
privValidator := types.GenPrivValidator()
privValidatorJSONBytes := wire.JSONBytesPretty(privValidator)
fmt.Printf(`Generated a new validator!
Paste the following JSON into your %v file
%v
`,
config.GetString("priv_validator_file"),
string(privValidatorJSONBytes),
)
}

View File

@@ -1,23 +0,0 @@
package main
import (
. "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/types"
)
func init_files() {
privValidator := types.GenPrivValidator()
privValidator.SetFile(config.GetString("priv_validator_file"))
privValidator.Save()
genDoc := types.GenesisDoc{
ChainID: Fmt("test-chain-%v", RandStr(6)),
}
genDoc.Validators = []types.GenesisValidator{types.GenesisValidator{
PubKey: privValidator.PubKey,
Amount: 10,
}}
genDoc.SaveAs(config.GetString("genesis_file"))
}

View File

@@ -1,7 +0,0 @@
package main
import (
"github.com/tendermint/go-logger"
)
var log = logger.New("module", "main")

View File

@@ -4,63 +4,12 @@ import (
"fmt"
"os"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-logger"
tmcfg "github.com/tendermint/tendermint/config/tendermint"
"github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/version"
"github.com/tendermint/tendermint/cmd/tendermint/commands"
)
var config cfg.Config
func main() {
args := os.Args[1:]
if len(args) == 0 {
fmt.Println(`Tendermint
Commands:
node Run the tendermint node
show_validator Show this node's validator info
gen_validator Generate new validator keypair
probe_upnp Test UPnP functionality
version Show version info
`)
return
}
// Get configuration
config = tmcfg.GetConfig("")
parseFlags(config, args[1:]) // Command line overrides
// set the log level
logger.SetLogLevel(config.GetString("log_level"))
switch args[0] {
case "node":
node.RunNode(config)
case "replay":
if len(args) > 1 && args[1] == "console" {
node.RunReplayConsole(config)
} else {
node.RunReplay(config)
}
case "init":
init_files()
case "show_validator":
show_validator()
case "gen_validator":
gen_validator()
case "probe_upnp":
probe_upnp()
case "unsafe_reset_all":
reset_all()
case "unsafe_reset_priv_validator":
reset_priv_validator()
case "version":
fmt.Println(version.Version)
default:
Exit(Fmt("Unknown command %v\n", args[0]))
if err := commands.RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

View File

@@ -1,36 +0,0 @@
package main
import (
"os"
"github.com/tendermint/tendermint/types"
)
// NOTE: this is totally unsafe.
// it's only suitable for testnets.
func reset_all() {
reset_priv_validator()
os.RemoveAll(config.GetString("db_dir"))
os.Remove(config.GetString("cswal"))
}
// NOTE: this is totally unsafe.
// it's only suitable for testnets.
func reset_priv_validator() {
// Get PrivValidator
var privValidator *types.PrivValidator
privValidatorFile := config.GetString("priv_validator_file")
if _, err := os.Stat(privValidatorFile); err == nil {
privValidator = types.LoadPrivValidator(privValidatorFile)
privValidator.LastHeight = 0
privValidator.LastRound = 0
privValidator.LastStep = 0
privValidator.Save()
log.Notice("Reset PrivValidator", "file", privValidatorFile)
} else {
privValidator = types.GenPrivValidator()
privValidator.SetFile(privValidatorFile)
privValidator.Save()
log.Notice("Generated PrivValidator", "file", privValidatorFile)
}
}

View File

@@ -11,6 +11,10 @@ import (
func getTMRoot(rootDir string) string {
if rootDir == "" {
rootDir = os.Getenv("TMHOME")
}
if rootDir == "" {
// deprecated, use TMHOME (TODO: remove in TM 0.11.0)
rootDir = os.Getenv("TMROOT")
}
if rootDir == "" {
@@ -22,6 +26,7 @@ func getTMRoot(rootDir string) string {
func initTMRoot(rootDir string) {
rootDir = getTMRoot(rootDir)
EnsureDir(rootDir, 0700)
EnsureDir(rootDir+"/data", 0700)
configFilePath := path.Join(rootDir, "config.toml")
@@ -53,26 +58,34 @@ func GetConfig(rootDir string) cfg.Config {
mapConfig.SetRequired("chain_id") // blows up if you try to use it before setting.
mapConfig.SetDefault("genesis_file", rootDir+"/genesis.json")
mapConfig.SetDefault("proxy_app", "tcp://127.0.0.1:46658")
mapConfig.SetDefault("tmsp", "socket")
mapConfig.SetDefault("abci", "socket")
mapConfig.SetDefault("moniker", "anonymous")
mapConfig.SetDefault("node_laddr", "0.0.0.0:46656")
mapConfig.SetDefault("node_laddr", "tcp://0.0.0.0:46656")
mapConfig.SetDefault("seeds", "")
// mapConfig.SetDefault("seeds", "goldenalchemist.chaintest.net:46656")
mapConfig.SetDefault("fast_sync", true)
mapConfig.SetDefault("skip_upnp", false)
mapConfig.SetDefault("addrbook_file", rootDir+"/addrbook.json")
mapConfig.SetDefault("addrbook_strict", true) // disable to allow connections locally
mapConfig.SetDefault("pex_reactor", false) // enable for peer exchange
mapConfig.SetDefault("priv_validator_file", rootDir+"/priv_validator.json")
mapConfig.SetDefault("db_backend", "leveldb")
mapConfig.SetDefault("db_dir", rootDir+"/data")
mapConfig.SetDefault("log_level", "info")
mapConfig.SetDefault("rpc_laddr", "0.0.0.0:46657")
mapConfig.SetDefault("rpc_laddr", "tcp://0.0.0.0:46657")
mapConfig.SetDefault("grpc_laddr", "")
mapConfig.SetDefault("prof_laddr", "")
mapConfig.SetDefault("revision_file", rootDir+"/revision")
mapConfig.SetDefault("cswal", rootDir+"/data/cswal")
mapConfig.SetDefault("cswal_light", false)
mapConfig.SetDefault("cs_wal_file", rootDir+"/data/cs.wal/wal")
mapConfig.SetDefault("cs_wal_light", false)
mapConfig.SetDefault("filter_peers", false)
mapConfig.SetDefault("block_size", 10000)
mapConfig.SetDefault("block_size", 10000) // max number of txs
mapConfig.SetDefault("block_part_size", 65536) // part size 64K
mapConfig.SetDefault("disable_data_hash", false)
// all timeouts are in ms
mapConfig.SetDefault("timeout_handshake", 10000)
mapConfig.SetDefault("timeout_propose", 3000)
mapConfig.SetDefault("timeout_propose_delta", 500)
mapConfig.SetDefault("timeout_prevote", 1000)
@@ -80,9 +93,15 @@ func GetConfig(rootDir string) cfg.Config {
mapConfig.SetDefault("timeout_precommit", 1000)
mapConfig.SetDefault("timeout_precommit_delta", 500)
mapConfig.SetDefault("timeout_commit", 1000)
// make progress asap (no `timeout_commit`) on full precommit votes
mapConfig.SetDefault("skip_timeout_commit", false)
mapConfig.SetDefault("mempool_recheck", true)
mapConfig.SetDefault("mempool_recheck_empty", true)
mapConfig.SetDefault("mempool_broadcast", true)
mapConfig.SetDefault("mempool_wal_dir", rootDir+"/data/mempool.wal")
mapConfig.SetDefault("tx_index", "kv")
return mapConfig
}
@@ -92,12 +111,12 @@ var defaultConfigTmpl = `# This is a TOML config file.
proxy_app = "tcp://127.0.0.1:46658"
moniker = "__MONIKER__"
node_laddr = "0.0.0.0:46656"
node_laddr = "tcp://0.0.0.0:46656"
seeds = ""
fast_sync = true
db_backend = "leveldb"
log_level = "notice"
rpc_laddr = "0.0.0.0:46657"
rpc_laddr = "tcp://0.0.0.0:46657"
`
func defaultConfig(moniker string) (defaultConfig string) {

View File

@@ -9,6 +9,7 @@ import (
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-logger"
)
func init() {
@@ -33,6 +34,7 @@ func initTMRoot(rootDir string) {
}
// Create new dir
EnsureDir(rootDir, 0700)
EnsureDir(rootDir+"/data", 0700)
configFilePath := path.Join(rootDir, "config.toml")
genesisFilePath := path.Join(rootDir, "genesis.json")
@@ -68,34 +70,46 @@ func ResetConfig(localPath string) cfg.Config {
mapConfig.SetDefault("chain_id", "tendermint_test")
mapConfig.SetDefault("genesis_file", rootDir+"/genesis.json")
mapConfig.SetDefault("proxy_app", "dummy")
mapConfig.SetDefault("tmsp", "socket")
mapConfig.SetDefault("abci", "socket")
mapConfig.SetDefault("moniker", "anonymous")
mapConfig.SetDefault("node_laddr", "0.0.0.0:36656")
mapConfig.SetDefault("node_laddr", "tcp://0.0.0.0:36656")
mapConfig.SetDefault("fast_sync", false)
mapConfig.SetDefault("skip_upnp", true)
mapConfig.SetDefault("addrbook_file", rootDir+"/addrbook.json")
mapConfig.SetDefault("addrbook_strict", true) // disable to allow connections locally
mapConfig.SetDefault("pex_reactor", false) // enable for peer exchange
mapConfig.SetDefault("priv_validator_file", rootDir+"/priv_validator.json")
mapConfig.SetDefault("db_backend", "memdb")
mapConfig.SetDefault("db_dir", rootDir+"/data")
mapConfig.SetDefault("log_level", "debug")
mapConfig.SetDefault("rpc_laddr", "0.0.0.0:36657")
mapConfig.SetDefault("log_level", "info")
mapConfig.SetDefault("rpc_laddr", "tcp://0.0.0.0:36657")
mapConfig.SetDefault("grpc_laddr", "tcp://0.0.0.0:36658")
mapConfig.SetDefault("prof_laddr", "")
mapConfig.SetDefault("revision_file", rootDir+"/revision")
mapConfig.SetDefault("cswal", rootDir+"/data/cswal")
mapConfig.SetDefault("cswal_light", false)
mapConfig.SetDefault("cs_wal_file", rootDir+"/data/cs.wal/wal")
mapConfig.SetDefault("cs_wal_light", false)
mapConfig.SetDefault("filter_peers", false)
mapConfig.SetDefault("block_size", 10000)
mapConfig.SetDefault("block_part_size", 65536) // part size 64K
mapConfig.SetDefault("disable_data_hash", false)
mapConfig.SetDefault("timeout_propose", 3000)
mapConfig.SetDefault("timeout_propose_delta", 1000)
mapConfig.SetDefault("timeout_prevote", 2000)
mapConfig.SetDefault("timeout_prevote_delta", 1000)
mapConfig.SetDefault("timeout_precommit", 2000)
mapConfig.SetDefault("timeout_precommit_delta", 1000)
mapConfig.SetDefault("timeout_commit", 1000)
mapConfig.SetDefault("timeout_handshake", 10000)
mapConfig.SetDefault("timeout_propose", 2000)
mapConfig.SetDefault("timeout_propose_delta", 1)
mapConfig.SetDefault("timeout_prevote", 10)
mapConfig.SetDefault("timeout_prevote_delta", 1)
mapConfig.SetDefault("timeout_precommit", 10)
mapConfig.SetDefault("timeout_precommit_delta", 1)
mapConfig.SetDefault("timeout_commit", 10)
mapConfig.SetDefault("skip_timeout_commit", true)
mapConfig.SetDefault("mempool_recheck", true)
mapConfig.SetDefault("mempool_recheck_empty", true)
mapConfig.SetDefault("mempool_broadcast", true)
mapConfig.SetDefault("mempool_wal_dir", "")
mapConfig.SetDefault("tx_index", "kv")
logger.SetLogLevel(mapConfig.GetString("log_level"))
return mapConfig
}
@@ -105,12 +119,12 @@ var defaultConfigTmpl = `# This is a TOML config file.
proxy_app = "dummy"
moniker = "__MONIKER__"
node_laddr = "0.0.0.0:36656"
node_laddr = "tcp://0.0.0.0:36656"
seeds = ""
fast_sync = false
db_backend = "memdb"
log_level = "debug"
rpc_laddr = "0.0.0.0:36657"
log_level = "info"
rpc_laddr = "tcp://0.0.0.0:36657"
`
func defaultConfig(moniker string) (defaultConfig string) {

View File

@@ -1,4 +1,18 @@
The core consensus algorithm.
# The core consensus algorithm.
* state.go - The state machine as detailed in the whitepaper
* reactor.go - A reactor that connects the state machine to the gossip network
# Go-routine summary
The reactor runs 2 go-routines for each added peer: gossipDataRoutine and gossipVotesRoutine.
The consensus state runs two persistent go-routines: timeoutRoutine and receiveRoutine.
Go-routines are also started to trigger timeouts and to avoid blocking when the internalMsgQueue is really backed up.
# Replay/WAL
A write-ahead log is used to record all messages processed by the receiveRoutine,
which amounts to all inputs to the consensus state machine:
messages from peers, messages from ourselves, and timeouts.
They can be played back deterministically at startup or using the replay console.

297
consensus/byzantine_test.go Normal file
View File

@@ -0,0 +1,297 @@
package consensus
import (
"sync"
"testing"
"time"
"github.com/tendermint/tendermint/config/tendermint_test"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-events"
"github.com/tendermint/go-p2p"
"github.com/tendermint/tendermint/types"
)
func init() {
config = tendermint_test.ResetConfig("consensus_byzantine_test")
}
//----------------------------------------------
// byzantine failures
// 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals).
// byzantine validator sends conflicting proposals into A and B,
// and prevotes/precommits on both of them.
// B sees a commit, A doesn't.
// Byzantine validator refuses to prevote.
// Heal partition and ensure A sees the commit
func TestByzantine(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter)
// give the byzantine validator a normal ticker
css[0].SetTimeoutTicker(NewTimeoutTicker())
switches := make([]*p2p.Switch, N)
for i := 0; i < N; i++ {
switches[i] = p2p.NewSwitch(cfg.NewMapConfig(nil))
}
reactors := make([]p2p.Reactor, N)
defer func() {
for _, r := range reactors {
if rr, ok := r.(*ByzantineReactor); ok {
rr.reactor.Switch.Stop()
} else {
r.(*ConsensusReactor).Switch.Stop()
}
}
}()
eventChans := make([]chan interface{}, N)
for i := 0; i < N; i++ {
if i == 0 {
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator.(*types.PrivValidator))
// make byzantine
css[i].decideProposal = func(j int) func(int, int) {
return func(height, round int) {
byzantineDecideProposalFunc(height, round, css[j], switches[j])
}
}(i)
css[i].doPrevote = func(height, round int) {}
}
eventSwitch := events.NewEventSwitch()
_, err := eventSwitch.Start()
if err != nil {
t.Fatalf("Failed to start switch: %v", err)
}
eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1)
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
conR.SetEventSwitch(eventSwitch)
var conRI p2p.Reactor
conRI = conR
if i == 0 {
conRI = NewByzantineReactor(conR)
}
reactors[i] = conRI
}
p2p.MakeConnectedSwitches(N, func(i int, s *p2p.Switch) *p2p.Switch {
// ignore new switch s, we already made ours
switches[i].AddReactor("CONSENSUS", reactors[i])
return switches[i]
}, func(sws []*p2p.Switch, i, j int) {
// the network starts partitioned with globally active adversary
if i != 0 {
return
}
p2p.Connect2Switches(sws, i, j)
})
// start the state machines
byzR := reactors[0].(*ByzantineReactor)
s := byzR.reactor.conS.GetState()
byzR.reactor.SwitchToConsensus(s)
for i := 1; i < N; i++ {
cr := reactors[i].(*ConsensusReactor)
cr.SwitchToConsensus(cr.conS.GetState())
}
// byz proposer sends one block to peers[0]
// and the other block to peers[1] and peers[2].
// note peers and switches order don't match.
peers := switches[0].Peers().List()
ind0 := getSwitchIndex(switches, peers[0])
ind1 := getSwitchIndex(switches, peers[1])
ind2 := getSwitchIndex(switches, peers[2])
// connect the 2 peers in the larger partition
p2p.Connect2Switches(switches, ind1, ind2)
// wait for someone in the big partition to make a block
select {
case <-eventChans[ind2]:
}
log.Notice("A block has been committed. Healing partition")
// connect the partitions
p2p.Connect2Switches(switches, ind0, ind1)
p2p.Connect2Switches(switches, ind0, ind2)
// wait till everyone makes the first new block
// (one of them already has)
wg := new(sync.WaitGroup)
wg.Add(2)
for i := 1; i < N-1; i++ {
go func(j int) {
<-eventChans[j]
wg.Done()
}(i)
}
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
tick := time.NewTicker(time.Second * 10)
select {
case <-done:
case <-tick.C:
for i, reactor := range reactors {
t.Log(Fmt("Consensus Reactor %v", i))
t.Log(Fmt("%v", reactor))
}
t.Fatalf("Timed out waiting for all validators to commit first block")
}
}
//-------------------------------
// byzantine consensus functions
func byzantineDecideProposalFunc(height, round int, cs *ConsensusState, sw *p2p.Switch) {
// byzantine user should create two proposals and try to split the vote.
// Avoid sending on internalMsgQueue and running consensus state.
// Create a new proposal block from state/txs from the mempool.
block1, blockParts1 := cs.createProposalBlock()
polRound, polBlockID := cs.Votes.POLInfo()
proposal1 := types.NewProposal(height, round, blockParts1.Header(), polRound, polBlockID)
cs.privValidator.SignProposal(cs.state.ChainID, proposal1) // byzantine doesnt err
// Create a new proposal block from state/txs from the mempool.
block2, blockParts2 := cs.createProposalBlock()
polRound, polBlockID = cs.Votes.POLInfo()
proposal2 := types.NewProposal(height, round, blockParts2.Header(), polRound, polBlockID)
cs.privValidator.SignProposal(cs.state.ChainID, proposal2) // byzantine doesnt err
block1Hash := block1.Hash()
block2Hash := block2.Hash()
// broadcast conflicting proposals/block parts to peers
peers := sw.Peers().List()
log.Notice("Byzantine: broadcasting conflicting proposals", "peers", len(peers))
for i, peer := range peers {
if i < len(peers)/2 {
go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1)
} else {
go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2)
}
}
}
func sendProposalAndParts(height, round int, cs *ConsensusState, peer *p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
// proposal
msg := &ProposalMessage{Proposal: proposal}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
// parts
for i := 0; i < parts.Total(); i++ {
part := parts.GetPart(i)
msg := &BlockPartMessage{
Height: height, // This tells peer that this part applies to us.
Round: round, // This tells peer that this part applies to us.
Part: part,
}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
}
// votes
cs.mtx.Lock()
prevote, _ := cs.signVote(types.VoteTypePrevote, blockHash, parts.Header())
precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header())
cs.mtx.Unlock()
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{prevote}})
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{precommit}})
}
//----------------------------------------
// byzantine consensus reactor
type ByzantineReactor struct {
Service
reactor *ConsensusReactor
}
func NewByzantineReactor(conR *ConsensusReactor) *ByzantineReactor {
return &ByzantineReactor{
Service: conR,
reactor: conR,
}
}
func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) }
func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() }
func (br *ByzantineReactor) AddPeer(peer *p2p.Peer) {
if !br.reactor.IsRunning() {
return
}
// Create peerState for peer
peerState := NewPeerState(peer)
peer.Data.Set(types.PeerStateKey, peerState)
// Send our state to peer.
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
if !br.reactor.fastSync {
br.reactor.sendNewRoundStepMessages(peer)
}
}
func (br *ByzantineReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
br.reactor.RemovePeer(peer, reason)
}
func (br *ByzantineReactor) Receive(chID byte, peer *p2p.Peer, msgBytes []byte) {
br.reactor.Receive(chID, peer, msgBytes)
}
//----------------------------------------
// byzantine privValidator
type ByzantinePrivValidator struct {
Address []byte `json:"address"`
types.Signer `json:"-"`
mtx sync.Mutex
}
// Return a priv validator that will sign anything
func NewByzantinePrivValidator(pv *types.PrivValidator) *ByzantinePrivValidator {
return &ByzantinePrivValidator{
Address: pv.Address,
Signer: pv.Signer,
}
}
func (privVal *ByzantinePrivValidator) GetAddress() []byte {
return privVal.Address
}
func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote) error {
privVal.mtx.Lock()
defer privVal.mtx.Unlock()
// Sign
vote.Signature = privVal.Sign(types.SignBytes(chainID, vote))
return nil
}
func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) error {
privVal.mtx.Lock()
defer privVal.mtx.Unlock()
// Sign
proposal.Signature = privVal.Sign(types.SignBytes(chainID, proposal))
return nil
}
func (privVal *ByzantinePrivValidator) String() string {
return Fmt("PrivValidator{%X}", privVal.Address)
}

View File

@@ -1,15 +1,29 @@
package consensus
import (
"github.com/tendermint/go-events"
"github.com/tendermint/tendermint/types"
)
// NOTE: this is blocking
func subscribeToEvent(evsw *events.EventSwitch, receiver, eventID string, chanCap int) chan interface{} {
// listen for new round
// XXX: WARNING: these functions can halt the consensus as firing events is synchronous.
// Make sure to read off the channels, and in the case of subscribeToEventRespond, to write back on it
// NOTE: if chanCap=0, this blocks on the event being consumed
func subscribeToEvent(evsw types.EventSwitch, receiver, eventID string, chanCap int) chan interface{} {
// listen for event
ch := make(chan interface{}, chanCap)
evsw.AddListenerForEvent(receiver, eventID, func(data events.EventData) {
types.AddListenerForEvent(evsw, receiver, eventID, func(data types.TMEventData) {
ch <- data
})
return ch
}
// NOTE: this blocks on receiving a response after the event is consumed
func subscribeToEventRespond(evsw types.EventSwitch, receiver, eventID string) chan interface{} {
// listen for event
ch := make(chan interface{})
types.AddListenerForEvent(evsw, receiver, eventID, func(data types.TMEventData) {
ch <- data
<-ch
})
return ch
}

View File

@@ -3,52 +3,72 @@ package consensus
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path"
"sort"
"sync"
"testing"
"time"
abcicli "github.com/tendermint/abci/client"
abci "github.com/tendermint/abci/types"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
dbm "github.com/tendermint/go-db"
"github.com/tendermint/go-events"
"github.com/tendermint/go-p2p"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/config/tendermint_test"
mempl "github.com/tendermint/tendermint/mempool"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmspcli "github.com/tendermint/tmsp/client"
tmsp "github.com/tendermint/tmsp/types"
"github.com/tendermint/tmsp/example/counter"
"github.com/tendermint/abci/example/counter"
"github.com/tendermint/abci/example/dummy"
)
var config cfg.Config // NOTE: must be reset for each _test.go file
var ensureTimeout = time.Duration(2)
func ensureDir(dir string, mode os.FileMode) {
if err := EnsureDir(dir, mode); err != nil {
panic(err)
}
}
//-------------------------------------------------------------------------------
// validator stub (a dummy consensus peer we control)
type validatorStub struct {
Index int // Validator index. NOTE: we don't assume validator set changes.
Height int
Round int
*types.PrivValidator
}
func NewValidatorStub(privValidator *types.PrivValidator) *validatorStub {
var testMinPower = 10
func NewValidatorStub(privValidator *types.PrivValidator, valIndex int) *validatorStub {
return &validatorStub{
Index: valIndex,
PrivValidator: privValidator,
}
}
func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
vote := &types.Vote{
ValidatorIndex: vs.Index,
ValidatorAddress: vs.PrivValidator.Address,
Height: vs.Height,
Round: vs.Round,
Type: voteType,
BlockHash: hash,
BlockPartsHeader: header,
BlockID: types.BlockID{hash, header},
}
err := vs.PrivValidator.SignVote(config.GetString("chain_id"), vote)
return vote, err
}
// convenienve function for testing
// Sign vote for type/hash/header
func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSetHeader) *types.Vote {
v, err := vs.signVote(voteType, hash, header)
if err != nil {
@@ -57,102 +77,7 @@ func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSe
return v
}
// create proposal block from cs1 but sign it with vs
func decideProposal(cs1 *ConsensusState, cs2 *validatorStub, height, round int) (proposal *types.Proposal, block *types.Block) {
block, blockParts := cs1.createProposalBlock()
if block == nil { // on error
panic("error creating proposal block")
}
// Make proposal
proposal = types.NewProposal(height, round, blockParts.Header(), cs1.Votes.POLRound())
if err := cs2.SignProposal(config.GetString("chain_id"), proposal); err != nil {
panic(err)
}
return
}
//-------------------------------------------------------------------------------
// utils
/*
func nilRound(t *testing.T, cs1 *ConsensusState, vss ...*validatorStub) {
cs1.mtx.Lock()
height, round := cs1.Height, cs1.Round
cs1.mtx.Unlock()
waitFor(t, cs1, height, round, RoundStepPrevote)
signAddVoteToFromMany(types.VoteTypePrevote, cs1, nil, cs1.ProposalBlockParts.Header(), vss...)
waitFor(t, cs1, height, round, RoundStepPrecommit)
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, cs1.ProposalBlockParts.Header(), vss...)
waitFor(t, cs1, height, round+1, RoundStepNewRound)
}
*/
// NOTE: this switches the propser as far as `perspectiveOf` is concerned,
// but for simplicity we return a block it generated.
func changeProposer(t *testing.T, perspectiveOf *ConsensusState, newProposer *validatorStub) *types.Block {
_, v1 := perspectiveOf.Validators.GetByAddress(perspectiveOf.privValidator.Address)
v1.Accum, v1.VotingPower = 0, 0
if updated := perspectiveOf.Validators.Update(v1); !updated {
panic("failed to update validator")
}
_, v2 := perspectiveOf.Validators.GetByAddress(newProposer.Address)
v2.Accum, v2.VotingPower = 100, 100
if updated := perspectiveOf.Validators.Update(v2); !updated {
panic("failed to update validator")
}
// make the proposal
propBlock, _ := perspectiveOf.createProposalBlock()
if propBlock == nil {
panic("Failed to create proposal block with cs2")
}
return propBlock
}
func fixVotingPower(t *testing.T, cs1 *ConsensusState, addr2 []byte) {
_, v1 := cs1.Validators.GetByAddress(cs1.privValidator.Address)
_, v2 := cs1.Validators.GetByAddress(addr2)
v1.Accum, v1.VotingPower = v2.Accum, v2.VotingPower
if updated := cs1.Validators.Update(v1); !updated {
panic("failed to update validator")
}
}
func addVoteToFromMany(to *ConsensusState, votes []*types.Vote, froms ...*validatorStub) {
if len(votes) != len(froms) {
panic("len(votes) and len(froms) must match")
}
for i, from := range froms {
addVoteToFrom(to, from, votes[i])
}
}
func addVoteToFrom(to *ConsensusState, from *validatorStub, vote *types.Vote) {
to.mtx.Lock() // NOTE: wont need this when the vote comes with the index!
valIndex, _ := to.Validators.GetByAddress(from.PrivValidator.Address)
to.mtx.Unlock()
to.peerMsgQueue <- msgInfo{Msg: &VoteMessage{valIndex, vote}}
// added, err := to.TryAddVote(valIndex, vote, "")
/*
if _, ok := err.(*types.ErrVoteConflictingSignature); ok {
// let it fly
} else if !added {
fmt.Println("to, from, vote:", to.Height, from.Height, vote.Height)
panic(fmt.Sprintln("Failed to add vote. Err:", err))
} else if err != nil {
panic(fmt.Sprintln("Failed to add vote:", err))
}*/
}
func signVoteMany(voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote {
func signVotes(voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote {
votes := make([]*types.Vote, len(vss))
for i, vs := range vss {
votes[i] = signVote(vs, voteType, hash, header)
@@ -160,79 +85,6 @@ func signVoteMany(voteType byte, hash []byte, header types.PartSetHeader, vss ..
return votes
}
// add vote to one cs from another
// if voteCh is not nil, read all votes
func signAddVoteToFromMany(voteType byte, to *ConsensusState, hash []byte, header types.PartSetHeader, voteCh chan interface{}, froms ...*validatorStub) {
var wg chan struct{} // when done reading all votes
if voteCh != nil {
wg = readVotes(voteCh, len(froms))
}
for _, from := range froms {
vote := signVote(from, voteType, hash, header)
addVoteToFrom(to, from, vote)
}
if voteCh != nil {
<-wg
}
}
func signAddVoteToFrom(voteType byte, to *ConsensusState, from *validatorStub, hash []byte, header types.PartSetHeader, voteCh chan interface{}) *types.Vote {
var wg chan struct{} // when done reading all votes
if voteCh != nil {
wg = readVotes(voteCh, 1)
}
vote := signVote(from, voteType, hash, header)
addVoteToFrom(to, from, vote)
if voteCh != nil {
<-wg
}
return vote
}
func ensureNoNewStep(stepCh chan interface{}) {
timeout := time.NewTicker(ensureTimeout * time.Second)
select {
case <-timeout.C:
break
case <-stepCh:
panic("We should be stuck waiting for more votes, not moving to the next step")
}
}
/*
func ensureNoNewStep(t *testing.T, cs *ConsensusState) {
timeout := time.NewTicker(ensureTimeout * time.Second)
select {
case <-timeout.C:
break
case <-cs.NewStepCh():
panic("We should be stuck waiting for more votes, not moving to the next step")
}
}
func ensureNewStep(t *testing.T, cs *ConsensusState) *RoundState {
timeout := time.NewTicker(ensureTimeout * time.Second)
select {
case <-timeout.C:
panic("We should have gone to the next step, not be stuck waiting")
case rs := <-cs.NewStepCh():
return rs
}
}
func waitFor(t *testing.T, cs *ConsensusState, height int, round int, step RoundStepType) {
for {
rs := ensureNewStep(t, cs)
if CompareHRS(rs.Height, rs.Round, rs.Step, height, round, step) < 0 {
continue
} else {
break
}
}
}
*/
func incrementHeight(vss ...*validatorStub) {
for _, vs := range vss {
vs.Height += 1
@@ -245,6 +97,41 @@ func incrementRound(vss ...*validatorStub) {
}
}
//-------------------------------------------------------------------------------
// Functions for transitioning the consensus state
func startTestRound(cs *ConsensusState, height, round int) {
cs.enterNewRound(height, round)
cs.startRoutines(0)
}
// Create proposal block from cs1 but sign it with vs
func decideProposal(cs1 *ConsensusState, vs *validatorStub, height, round int) (proposal *types.Proposal, block *types.Block) {
block, blockParts := cs1.createProposalBlock()
if block == nil { // on error
panic("error creating proposal block")
}
// Make proposal
polRound, polBlockID := cs1.Votes.POLInfo()
proposal = types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID)
if err := vs.SignProposal(config.GetString("chain_id"), proposal); err != nil {
panic(err)
}
return
}
func addVotes(to *ConsensusState, votes ...*types.Vote) {
for _, vote := range votes {
to.peerMsgQueue <- msgInfo{Msg: &VoteMessage{vote}}
}
}
func signAddVotes(to *ConsensusState, voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) {
votes := signVotes(voteType, hash, header, vss...)
addVotes(to, votes...)
}
func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *validatorStub, blockHash []byte) {
prevotes := cs.Votes.Prevotes(round)
var vote *types.Vote
@@ -252,12 +139,12 @@ func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *valid
panic("Failed to find prevote from validator")
}
if blockHash == nil {
if vote.BlockHash != nil {
panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockHash))
if vote.BlockID.Hash != nil {
panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockID.Hash))
}
} else {
if !bytes.Equal(vote.BlockHash, blockHash) {
panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockHash))
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash))
}
}
}
@@ -268,8 +155,8 @@ func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorS
if vote = votes.GetByAddress(privVal.Address); vote == nil {
panic("Failed to find precommit from validator")
}
if !bytes.Equal(vote.BlockHash, blockHash) {
panic(fmt.Sprintf("Expected precommit to be for %X, got %X", blockHash, vote.BlockHash))
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
panic(fmt.Sprintf("Expected precommit to be for %X, got %X", blockHash, vote.BlockID.Hash))
}
}
@@ -281,11 +168,11 @@ func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound in
}
if votedBlockHash == nil {
if vote.BlockHash != nil {
if vote.BlockID.Hash != nil {
panic("Expected precommit to be for nil")
}
} else {
if !bytes.Equal(vote.BlockHash, votedBlockHash) {
if !bytes.Equal(vote.BlockID.Hash, votedBlockHash) {
panic("Expected precommit to be for proposal block")
}
}
@@ -311,55 +198,7 @@ func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lo
cs.mtx.Unlock()
}
func fixedConsensusState() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
privValidatorFile := config.GetString("priv_validator_file")
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
return newConsensusState(state, privValidator, counter.NewCounterApplication(true))
}
func newConsensusState(state *sm.State, pv *types.PrivValidator, app tmsp.Application) *ConsensusState {
// Get BlockStore
blockDB := dbm.NewMemDB()
blockStore := bc.NewBlockStore(blockDB)
// one for mempool, one for consensus
mtx := new(sync.Mutex)
proxyAppConnMem := tmspcli.NewLocalClient(mtx, app)
proxyAppConnCon := tmspcli.NewLocalClient(mtx, app)
// Make Mempool
mempool := mempl.NewMempool(config, proxyAppConnMem)
// Make ConsensusReactor
cs := NewConsensusState(config, state, proxyAppConnCon, blockStore, mempool)
cs.SetPrivValidator(pv)
evsw := events.NewEventSwitch()
cs.SetEventSwitch(evsw)
evsw.Start()
return cs
}
func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
// Get State
state, privVals := randGenesisState(nValidators, false, 10)
vss := make([]*validatorStub, nValidators)
cs := newConsensusState(state, privVals[0], counter.NewCounterApplication(true))
for i := 0; i < nValidators; i++ {
vss[i] = NewValidatorStub(privVals[i])
}
// since cs1 starts at 1
incrementHeight(vss[1:]...)
return cs, vss
}
// genesis
func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
voteCh0 := subscribeToEvent(cs.evsw, "tester", types.EventStringVote(), 1)
voteCh := make(chan interface{})
@@ -368,7 +207,7 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
v := <-voteCh0
vote := v.(types.EventDataVote)
// we only fire for our own votes
if bytes.Equal(addr, vote.Address) {
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
voteCh <- v
}
}
@@ -387,14 +226,145 @@ func readVotes(ch chan interface{}, reads int) chan struct{} {
return wg
}
func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.State, []*types.PrivValidator) {
db := dbm.NewMemDB()
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
s0 := sm.MakeGenesisState(db, genDoc)
s0.Save()
return s0, privValidators
//-------------------------------------------------------------------------------
// consensus states
func newConsensusState(state *sm.State, pv *types.PrivValidator, app abci.Application) *ConsensusState {
return newConsensusStateWithConfig(config, state, pv, app)
}
func newConsensusStateWithConfig(thisConfig cfg.Config, state *sm.State, pv *types.PrivValidator, app abci.Application) *ConsensusState {
// Get BlockStore
blockDB := dbm.NewMemDB()
blockStore := bc.NewBlockStore(blockDB)
// one for mempool, one for consensus
mtx := new(sync.Mutex)
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
// Make Mempool
mempool := mempl.NewMempool(thisConfig, proxyAppConnMem)
// Make ConsensusReactor
cs := NewConsensusState(thisConfig, state, proxyAppConnCon, blockStore, mempool)
cs.SetPrivValidator(pv)
evsw := types.NewEventSwitch()
cs.SetEventSwitch(evsw)
evsw.Start()
return cs
}
func loadPrivValidator(conf cfg.Config) *types.PrivValidator {
privValidatorFile := conf.GetString("priv_validator_file")
ensureDir(path.Dir(privValidatorFile), 0700)
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
privValidator.Reset()
return privValidator
}
func fixedConsensusState() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
privValidator := loadPrivValidator(config)
cs := newConsensusState(state, privValidator, counter.NewCounterApplication(true))
return cs
}
func fixedConsensusStateDummy() *ConsensusState {
stateDB := dbm.NewMemDB()
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
privValidator := loadPrivValidator(config)
cs := newConsensusState(state, privValidator, dummy.NewDummyApplication())
return cs
}
func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
// Get State
state, privVals := randGenesisState(nValidators, false, 10)
vss := make([]*validatorStub, nValidators)
cs := newConsensusState(state, privVals[0], counter.NewCounterApplication(true))
for i := 0; i < nValidators; i++ {
vss[i] = NewValidatorStub(privVals[i], i)
}
// since cs1 starts at 1
incrementHeight(vss[1:]...)
return cs, vss
}
//-------------------------------------------------------------------------------
func ensureNoNewStep(stepCh chan interface{}) {
timeout := time.NewTicker(ensureTimeout * time.Second)
select {
case <-timeout.C:
break
case <-stepCh:
panic("We should be stuck waiting for more votes, not moving to the next step")
}
}
//-------------------------------------------------------------------------------
// consensus nets
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
genDoc, privVals := randGenesisDoc(nValidators, false, 10)
css := make([]*ConsensusState, nValidators)
for i := 0; i < nValidators; i++ {
db := dbm.NewMemDB() // each state needs its own db
state := sm.MakeGenesisState(db, genDoc)
state.Save()
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
ensureDir(path.Dir(thisConfig.GetString("cs_wal_file")), 0700) // dir for wal
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], appFunc())
css[i].SetTimeoutTicker(tickerFunc())
}
return css
}
// nPeers = nValidators + nNotValidator
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
genDoc, privVals := randGenesisDoc(nValidators, false, int64(testMinPower))
css := make([]*ConsensusState, nPeers)
for i := 0; i < nPeers; i++ {
db := dbm.NewMemDB() // each state needs its own db
state := sm.MakeGenesisState(db, genDoc)
state.Save()
thisConfig := tendermint_test.ResetConfig(Fmt("%s_%d", testName, i))
ensureDir(path.Dir(thisConfig.GetString("cs_wal_file")), 0700) // dir for wal
var privVal *types.PrivValidator
if i < nValidators {
privVal = privVals[i]
} else {
privVal = types.GenPrivValidator()
_, tempFilePath := Tempfile("priv_validator_")
privVal.SetFile(tempFilePath)
}
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, appFunc())
css[i].SetTimeoutTicker(tickerFunc())
}
return css
}
func getSwitchIndex(switches []*p2p.Switch, peer *p2p.Peer) int {
for i, s := range switches {
if bytes.Equal(peer.NodeInfo.PubKey.Address(), s.NodeInfo().PubKey.Address()) {
return i
}
}
panic("didnt find peer in switches")
return -1
}
//-------------------------------------------------------------------------------
// genesis
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []*types.PrivValidator) {
validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]*types.PrivValidator, numValidators)
@@ -412,10 +382,69 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
ChainID: config.GetString("chain_id"),
Validators: validators,
}, privValidators
}
func startTestRound(cs *ConsensusState, height, round int) {
cs.enterNewRound(height, round)
cs.startRoutines(0)
func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.State, []*types.PrivValidator) {
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
db := dbm.NewMemDB()
s0 := sm.MakeGenesisState(db, genDoc)
s0.Save()
return s0, privValidators
}
//------------------------------------
// mock ticker
func newMockTickerFunc(onlyOnce bool) func() TimeoutTicker {
return func() TimeoutTicker {
return &mockTicker{
c: make(chan timeoutInfo, 10),
onlyOnce: onlyOnce,
}
}
}
// mock ticker only fires on RoundStepNewHeight
// and only once if onlyOnce=true
type mockTicker struct {
c chan timeoutInfo
mtx sync.Mutex
onlyOnce bool
fired bool
}
func (m *mockTicker) Start() (bool, error) {
return true, nil
}
func (m *mockTicker) Stop() bool {
return true
}
func (m *mockTicker) ScheduleTimeout(ti timeoutInfo) {
m.mtx.Lock()
defer m.mtx.Unlock()
if m.onlyOnce && m.fired {
return
}
if ti.Step == RoundStepNewHeight {
m.c <- ti
m.fired = true
}
}
func (m *mockTicker) Chan() <-chan timeoutInfo {
return m.c
}
//------------------------------------
func newCounter() abci.Application {
return counter.NewCounterApplication(true)
}
func newPersistentDummy() abci.Application {
dir, _ := ioutil.TempDir("/tmp", "persistent-dummy")
return dummy.NewPersistentDummyApplication(dir)
}

View File

@@ -24,6 +24,8 @@ but which round is not known in advance, so when a peer
provides a precommit for a round greater than mtx.round,
we create a new entry in roundVoteSets but also remember the
peer to prevent abuse.
We let each peer provide us with up to 2 unexpected "catchup" rounds.
One for their LastCommit round, and another for the official commit round.
*/
type HeightVoteSet struct {
chainID string
@@ -33,7 +35,7 @@ type HeightVoteSet struct {
mtx sync.Mutex
round int // max tracked round
roundVoteSets map[int]RoundVoteSet // keys: [0...round]
peerCatchupRounds map[string]int // keys: peer.Key; values: round
peerCatchupRounds map[string][]int // keys: peer.Key; values: at most 2 rounds
}
func NewHeightVoteSet(chainID string, height int, valSet *types.ValidatorSet) *HeightVoteSet {
@@ -51,7 +53,7 @@ func (hvs *HeightVoteSet) Reset(height int, valSet *types.ValidatorSet) {
hvs.height = height
hvs.valSet = valSet
hvs.roundVoteSets = make(map[int]RoundVoteSet)
hvs.peerCatchupRounds = make(map[string]int)
hvs.peerCatchupRounds = make(map[string][]int)
hvs.addRound(0)
hvs.round = 0
@@ -100,15 +102,18 @@ func (hvs *HeightVoteSet) addRound(round int) {
// Duplicate votes return added=false, err=nil.
// By convention, peerKey is "" if origin is self.
func (hvs *HeightVoteSet) AddByIndex(valIndex int, vote *types.Vote, peerKey string) (added bool, address []byte, err error) {
func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerKey string) (added bool, err error) {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
if !types.IsVoteTypeValid(vote.Type) {
return
}
voteSet := hvs.getVoteSet(vote.Round, vote.Type)
if voteSet == nil {
if _, ok := hvs.peerCatchupRounds[peerKey]; !ok {
if rndz := hvs.peerCatchupRounds[peerKey]; len(rndz) < 2 {
hvs.addRound(vote.Round)
voteSet = hvs.getVoteSet(vote.Round, vote.Type)
hvs.peerCatchupRounds[peerKey] = vote.Round
hvs.peerCatchupRounds[peerKey] = append(rndz, vote.Round)
} else {
// Peer has sent a vote that does not match our round,
// for more than one round. Bad peer!
@@ -117,7 +122,7 @@ func (hvs *HeightVoteSet) AddByIndex(valIndex int, vote *types.Vote, peerKey str
return
}
}
added, address, err = voteSet.AddByIndex(valIndex, vote)
added, err = voteSet.AddVote(vote)
return
}
@@ -133,17 +138,19 @@ func (hvs *HeightVoteSet) Precommits(round int) *types.VoteSet {
return hvs.getVoteSet(round, types.VoteTypePrecommit)
}
// Last round that has +2/3 prevotes for a particular block or nil.
// Last round and blockID that has +2/3 prevotes for a particular block or nil.
// Returns -1 if no such round exists.
func (hvs *HeightVoteSet) POLRound() int {
func (hvs *HeightVoteSet) POLInfo() (polRound int, polBlockID types.BlockID) {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
for r := hvs.round; r >= 0; r-- {
if hvs.getVoteSet(r, types.VoteTypePrevote).HasTwoThirdsMajority() {
return r
rvs := hvs.getVoteSet(r, types.VoteTypePrevote)
polBlockID, ok := rvs.TwoThirdsMajority()
if ok {
return r, polBlockID
}
}
return -1
return -1, types.BlockID{}
}
func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet {
@@ -167,6 +174,8 @@ func (hvs *HeightVoteSet) String() string {
}
func (hvs *HeightVoteSet) StringIndented(indent string) string {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
vsStrings := make([]string, 0, (len(hvs.roundVoteSets)+1)*2)
// rounds 0 ~ hvs.round inclusive
for round := 0; round <= hvs.round; round++ {
@@ -192,3 +201,20 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string {
indent, strings.Join(vsStrings, "\n"+indent+" "),
indent)
}
// If a peer claims that it has 2/3 majority for given blockKey, call this.
// NOTE: if there are too many peers, or too much peer churn,
// this can cause memory issues.
// TODO: implement ability to remove peers too
func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID string, blockID types.BlockID) {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
if !types.IsVoteTypeValid(type_) {
return
}
voteSet := hvs.getVoteSet(round, type_)
if voteSet == nil {
return
}
voteSet.SetPeerMaj23(peerID, blockID)
}

View File

@@ -17,31 +17,40 @@ func TestPeerCatchupRounds(t *testing.T) {
hvs := NewHeightVoteSet(config.GetString("chain_id"), 1, valSet)
vote999_0 := makeVoteHR(t, 1, 999, privVals[0])
added, _, err := hvs.AddByIndex(0, vote999_0, "peer1")
vote999_0 := makeVoteHR(t, 1, 999, privVals, 0)
added, err := hvs.AddVote(vote999_0, "peer1")
if !added || err != nil {
t.Error("Expected to successfully add vote from peer", added, err)
}
vote1000_0 := makeVoteHR(t, 1, 1000, privVals[0])
added, _, err = hvs.AddByIndex(0, vote1000_0, "peer1")
vote1000_0 := makeVoteHR(t, 1, 1000, privVals, 0)
added, err = hvs.AddVote(vote1000_0, "peer1")
if !added || err != nil {
t.Error("Expected to successfully add vote from peer", added, err)
}
vote1001_0 := makeVoteHR(t, 1, 1001, privVals, 0)
added, err = hvs.AddVote(vote1001_0, "peer1")
if added {
t.Error("Expected to *not* add vote from peer, too many catchup rounds.")
}
added, _, err = hvs.AddByIndex(0, vote1000_0, "peer2")
added, err = hvs.AddVote(vote1001_0, "peer2")
if !added || err != nil {
t.Error("Expected to successfully add vote from another peer")
}
}
func makeVoteHR(t *testing.T, height, round int, privVal *types.PrivValidator) *types.Vote {
func makeVoteHR(t *testing.T, height, round int, privVals []*types.PrivValidator, valIndex int) *types.Vote {
privVal := privVals[valIndex]
vote := &types.Vote{
Height: height,
Round: round,
Type: types.VoteTypePrecommit,
BlockHash: []byte("fakehash"),
ValidatorAddress: privVal.Address,
ValidatorIndex: valIndex,
Height: height,
Round: round,
Type: types.VoteTypePrecommit,
BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}},
}
chainID := config.GetString("chain_id")
err := privVal.SignVote(chainID, vote)

View File

@@ -2,13 +2,12 @@ package consensus
import (
"encoding/binary"
// "math/rand"
"testing"
"time"
abci "github.com/tendermint/abci/types"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/tendermint/types"
tmsp "github.com/tendermint/tmsp/types"
. "github.com/tendermint/go-common"
)
@@ -24,8 +23,8 @@ func TestTxConcurrentWithCommit(t *testing.T) {
height, round := cs.Height, cs.Round
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
appendTxsRange := func(start, end int) {
// Append some txs.
deliverTxsRange := func(start, end int) {
// Deliver some txs.
for i := start; i < end; i++ {
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(i))
@@ -38,7 +37,7 @@ func TestTxConcurrentWithCommit(t *testing.T) {
}
NTxs := 10000
go appendTxsRange(0, NTxs)
go deliverTxsRange(0, NTxs)
startTestRound(cs, height, round)
ticker := time.NewTicker(time.Second * 20)
@@ -52,8 +51,68 @@ func TestTxConcurrentWithCommit(t *testing.T) {
}
}
func TestRmBadTx(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
app := NewCounterApplication()
cs := newConsensusState(state, privVals[0], app)
// increment the counter by 1
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(0))
app.DeliverTx(txBytes)
app.Commit()
ch := make(chan struct{})
cbCh := make(chan struct{})
go func() {
// Try to send the tx through the mempool.
// CheckTx should not err, but the app should return a bad abci code
// and the tx should get removed from the pool
err := cs.mempool.CheckTx(txBytes, func(r *abci.Response) {
if r.GetCheckTx().Code != abci.CodeType_BadNonce {
t.Fatalf("expected checktx to return bad nonce, got %v", r)
}
cbCh <- struct{}{}
})
if err != nil {
t.Fatal("Error after CheckTx: %v", err)
}
// check for the tx
for {
time.Sleep(time.Second)
txs := cs.mempool.Reap(1)
if len(txs) == 0 {
ch <- struct{}{}
return
}
}
}()
// Wait until the tx returns
ticker := time.After(time.Second * 5)
select {
case <-cbCh:
// success
case <-ticker:
t.Fatalf("Timed out waiting for tx to return")
}
// Wait until the tx is removed
ticker = time.After(time.Second * 5)
select {
case <-ch:
// success
case <-ticker:
t.Fatalf("Timed out waiting for tx to be removed")
}
}
// CounterApplication that maintains a mempool state and resets it upon commit
type CounterApplication struct {
abci.BaseApplication
txCount int
mempoolTxCount int
}
@@ -62,49 +121,37 @@ func NewCounterApplication() *CounterApplication {
return &CounterApplication{}
}
func (app *CounterApplication) Info() string {
return Fmt("txs:%v", app.txCount)
func (app *CounterApplication) Info() abci.ResponseInfo {
return abci.ResponseInfo{Data: Fmt("txs:%v", app.txCount)}
}
func (app *CounterApplication) SetOption(key string, value string) (log string) {
return ""
}
func (app *CounterApplication) AppendTx(tx []byte) tmsp.Result {
func (app *CounterApplication) DeliverTx(tx []byte) abci.Result {
return runTx(tx, &app.txCount)
}
func (app *CounterApplication) CheckTx(tx []byte) tmsp.Result {
func (app *CounterApplication) CheckTx(tx []byte) abci.Result {
return runTx(tx, &app.mempoolTxCount)
}
func runTx(tx []byte, countPtr *int) tmsp.Result {
func runTx(tx []byte, countPtr *int) abci.Result {
count := *countPtr
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(tx):], tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue != uint64(count) {
return tmsp.Result{
Code: tmsp.CodeType_BadNonce,
Data: nil,
Log: Fmt("Invalid nonce. Expected %v, got %v", count, txValue),
}
return abci.ErrBadNonce.AppendLog(Fmt("Invalid nonce. Expected %v, got %v", count, txValue))
}
*countPtr += 1
return tmsp.OK
return abci.OK
}
func (app *CounterApplication) Commit() tmsp.Result {
func (app *CounterApplication) Commit() abci.Result {
app.mempoolTxCount = app.txCount
if app.txCount == 0 {
return tmsp.OK
return abci.OK
} else {
hash := make([]byte, 8)
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
return tmsp.NewResultOK(hash, "")
return abci.NewResultOK(hash, "")
}
}
func (app *CounterApplication) Query(query []byte) tmsp.Result {
return tmsp.NewResultOK(nil, Fmt("Query is not supported"))
}

View File

@@ -9,39 +9,37 @@ import (
"time"
. "github.com/tendermint/go-common"
"github.com/tendermint/go-events"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-wire"
bc "github.com/tendermint/tendermint/blockchain"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
const (
StateChannel = byte(0x20)
DataChannel = byte(0x21)
VoteChannel = byte(0x22)
StateChannel = byte(0x20)
DataChannel = byte(0x21)
VoteChannel = byte(0x22)
VoteSetBitsChannel = byte(0x23)
peerGossipSleepDuration = 100 * time.Millisecond // Time to sleep if there's nothing to send.
maxConsensusMessageSize = 1048576 // 1MB; NOTE: keep in sync with types.PartSet sizes.
peerGossipSleepDuration = 100 * time.Millisecond // Time to sleep if there's nothing to send.
peerQueryMaj23SleepDuration = 2 * time.Second // Time to sleep after each VoteSetMaj23Message sent
maxConsensusMessageSize = 1048576 // 1MB; NOTE: keep in sync with types.PartSet sizes.
)
//-----------------------------------------------------------------------------
type ConsensusReactor struct {
p2p.BaseReactor // QuitService + p2p.Switch
p2p.BaseReactor // BaseService + p2p.Switch
blockStore *bc.BlockStore
conS *ConsensusState
fastSync bool
evsw *events.EventSwitch
conS *ConsensusState
fastSync bool
evsw types.EventSwitch
}
func NewConsensusReactor(consensusState *ConsensusState, blockStore *bc.BlockStore, fastSync bool) *ConsensusReactor {
func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *ConsensusReactor {
conR := &ConsensusReactor{
blockStore: blockStore,
conS: consensusState,
fastSync: fastSync,
conS: consensusState,
fastSync: fastSync,
}
conR.BaseReactor = *p2p.NewBaseReactor(log, "ConsensusReactor", conR)
return conR
@@ -102,6 +100,12 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
SendQueueCapacity: 100,
RecvBufferCapacity: 100 * 100,
},
&p2p.ChannelDescriptor{
ID: VoteSetBitsChannel,
Priority: 1,
SendQueueCapacity: 2,
RecvBufferCapacity: 1024,
},
}
}
@@ -115,14 +119,15 @@ func (conR *ConsensusReactor) AddPeer(peer *p2p.Peer) {
peerState := NewPeerState(peer)
peer.Data.Set(types.PeerStateKey, peerState)
// Begin gossip routines for this peer.
// Begin routines for this peer.
go conR.gossipDataRoutine(peer, peerState)
go conR.gossipVotesRoutine(peer, peerState)
go conR.queryMaj23Routine(peer, peerState)
// Send our state to peer.
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
if !conR.fastSync {
conR.sendNewRoundStepMessage(peer)
conR.sendNewRoundStepMessages(peer)
}
}
@@ -140,6 +145,7 @@ func (conR *ConsensusReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
// Messages affect either a peer state or the consensus state.
// Peer state updates can happen in parallel, but processing of
// proposals, block parts, and votes are ordered by the receiveRoutine
// NOTE: blocks on consensus state for proposals, block parts, and votes
func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
if !conR.IsRunning() {
log.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes)
@@ -152,7 +158,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
// TODO punish peer?
return
}
log.Info("Receive", "src", src, "chId", chID, "msg", msg)
log.Debug("Receive", "src", src, "chId", chID, "msg", msg)
// Get peer states
ps := src.Data.Get(types.PeerStateKey).(*PeerState)
@@ -166,6 +172,35 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
ps.ApplyCommitStepMessage(msg)
case *HasVoteMessage:
ps.ApplyHasVoteMessage(msg)
case *VoteSetMaj23Message:
cs := conR.conS
cs.mtx.Lock()
height, votes := cs.Height, cs.Votes
cs.mtx.Unlock()
if height != msg.Height {
return
}
// Peer claims to have a maj23 for some BlockID at H,R,S,
votes.SetPeerMaj23(msg.Round, msg.Type, ps.Peer.Key, msg.BlockID)
// Respond with a VoteSetBitsMessage showing which votes we have.
// (and consequently shows which we don't have)
var ourVotes *BitArray
switch msg.Type {
case types.VoteTypePrevote:
ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
case types.VoteTypePrecommit:
ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
default:
log.Warn("Bad VoteSetBitsMessage field Type")
return
}
src.TrySend(VoteSetBitsChannel, struct{ ConsensusMessage }{&VoteSetBitsMessage{
Height: msg.Height,
Round: msg.Round,
Type: msg.Type,
BlockID: msg.BlockID,
Votes: ourVotes,
}})
default:
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
@@ -201,7 +236,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
cs.mtx.Unlock()
ps.EnsureVoteBitArrays(height, valSize)
ps.EnsureVoteBitArrays(height-1, lastCommitSize)
ps.SetHasVote(msg.Vote, msg.ValidatorIndex)
ps.SetHasVote(msg.Vote)
conR.conS.peerMsgQueue <- msgInfo{msg, src.Key}
@@ -209,6 +244,39 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
// don't punish (leave room for soft upgrades)
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
case VoteSetBitsChannel:
if conR.fastSync {
log.Warn("Ignoring message received during fastSync", "msg", msg)
return
}
switch msg := msg.(type) {
case *VoteSetBitsMessage:
cs := conR.conS
cs.mtx.Lock()
height, votes := cs.Height, cs.Votes
cs.mtx.Unlock()
if height == msg.Height {
var ourVotes *BitArray
switch msg.Type {
case types.VoteTypePrevote:
ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
case types.VoteTypePrecommit:
ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
default:
log.Warn("Bad VoteSetBitsMessage field Type")
return
}
ps.ApplyVoteSetBitsMessage(msg, ourVotes)
} else {
ps.ApplyVoteSetBitsMessage(msg, nil)
}
default:
// don't punish (leave room for soft upgrades)
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
}
default:
log.Warn(Fmt("Unknown chId %X", chID))
}
@@ -218,13 +286,8 @@ func (conR *ConsensusReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
}
}
// Sets our private validator account for signing votes.
func (conR *ConsensusReactor) SetPrivValidator(priv *types.PrivValidator) {
conR.conS.SetPrivValidator(priv)
}
// implements events.Eventable
func (conR *ConsensusReactor) SetEventSwitch(evsw *events.EventSwitch) {
func (conR *ConsensusReactor) SetEventSwitch(evsw types.EventSwitch) {
conR.evsw = evsw
conR.conS.SetEventSwitch(evsw)
}
@@ -235,14 +298,14 @@ func (conR *ConsensusReactor) SetEventSwitch(evsw *events.EventSwitch) {
// broadcasting the result to peers
func (conR *ConsensusReactor) registerEventCallbacks() {
conR.evsw.AddListenerForEvent("conR", types.EventStringNewRoundStep(), func(data events.EventData) {
types.AddListenerForEvent(conR.evsw, "conR", types.EventStringNewRoundStep(), func(data types.TMEventData) {
rs := data.(types.EventDataRoundState).RoundState.(*RoundState)
conR.broadcastNewRoundStep(rs)
})
conR.evsw.AddListenerForEvent("conR", types.EventStringVote(), func(data events.EventData) {
types.AddListenerForEvent(conR.evsw, "conR", types.EventStringVote(), func(data types.TMEventData) {
edv := data.(types.EventDataVote)
conR.broadcastHasVoteMessage(edv.Vote, edv.Index)
conR.broadcastHasVoteMessage(edv.Vote)
})
}
@@ -258,12 +321,12 @@ func (conR *ConsensusReactor) broadcastNewRoundStep(rs *RoundState) {
}
// Broadcasts HasVoteMessage to peers that care.
func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote, index int) {
func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
msg := &HasVoteMessage{
Height: vote.Height,
Round: vote.Round,
Type: vote.Type,
Index: index,
Index: vote.ValidatorIndex,
}
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{msg})
/*
@@ -301,7 +364,7 @@ func makeRoundStepMessages(rs *RoundState) (nrsMsg *NewRoundStepMessage, csMsg *
return
}
func (conR *ConsensusReactor) sendNewRoundStepMessage(peer *p2p.Peer) {
func (conR *ConsensusReactor) sendNewRoundStepMessages(peer *p2p.Peer) {
rs := conR.conS.GetRoundState()
nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil {
@@ -335,8 +398,9 @@ OUTER_LOOP:
Round: rs.Round, // This tells peer that this part applies to us.
Part: part,
}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
}
continue OUTER_LOOP
}
}
@@ -346,18 +410,22 @@ OUTER_LOOP:
//log.Info("Data catchup", "height", rs.Height, "peerHeight", prs.Height, "peerProposalBlockParts", prs.ProposalBlockParts)
if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
// Ensure that the peer's PartSetHeader is correct
blockMeta := conR.blockStore.LoadBlockMeta(prs.Height)
if !blockMeta.PartsHeader.Equals(prs.ProposalBlockPartsHeader) {
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
if blockMeta == nil {
log.Warn("Failed to load block meta", "peer height", prs.Height, "our height", rs.Height, "blockstore height", conR.conS.blockStore.Height(), "pv", conR.conS.privValidator)
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
} else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) {
log.Info("Peer ProposalBlockPartsHeader mismatch, sleeping",
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
}
// Load the part
part := conR.blockStore.LoadBlockPart(prs.Height, index)
part := conR.conS.blockStore.LoadBlockPart(prs.Height, index)
if part == nil {
log.Warn("Could not load part", "index", index,
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
"peerHeight", prs.Height, "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
time.Sleep(peerGossipSleepDuration)
continue OUTER_LOOP
}
@@ -367,8 +435,9 @@ OUTER_LOOP:
Round: prs.Round, // Not our height, so it doesn't matter.
Part: part,
}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
}
continue OUTER_LOOP
} else {
//log.Info("No parts to send in catch-up, sleeping")
@@ -391,13 +460,14 @@ OUTER_LOOP:
// Send Proposal && ProposalPOL BitArray?
if rs.Proposal != nil && !prs.Proposal {
// Proposal
// Proposal: share the proposal metadata with peer.
{
msg := &ProposalMessage{Proposal: rs.Proposal}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
ps.SetHasProposal(rs.Proposal)
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
ps.SetHasProposal(rs.Proposal)
}
}
// ProposalPOL.
// ProposalPOL: lets peer know which POL votes we have so far.
// Peer must receive ProposalMessage first.
// rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
// so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
@@ -449,21 +519,21 @@ OUTER_LOOP:
// If there are lastCommits to send...
if prs.Step == RoundStepNewHeight {
if ps.PickSendVote(rs.LastCommit) {
log.Info("Picked rs.LastCommit to send")
log.Debug("Picked rs.LastCommit to send")
continue OUTER_LOOP
}
}
// If there are prevotes to send...
if prs.Step <= RoundStepPrevote && prs.Round != -1 && prs.Round <= rs.Round {
if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
log.Info("Picked rs.Prevotes(prs.Round) to send")
log.Debug("Picked rs.Prevotes(prs.Round) to send")
continue OUTER_LOOP
}
}
// If there are precommits to send...
if prs.Step <= RoundStepPrecommit && prs.Round != -1 && prs.Round <= rs.Round {
if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) {
log.Info("Picked rs.Precommits(prs.Round) to send")
log.Debug("Picked rs.Precommits(prs.Round) to send")
continue OUTER_LOOP
}
}
@@ -471,7 +541,7 @@ OUTER_LOOP:
if prs.ProposalPOLRound != -1 {
if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
if ps.PickSendVote(polPrevotes) {
log.Info("Picked rs.Prevotes(prs.ProposalPOLRound) to send")
log.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send")
continue OUTER_LOOP
}
}
@@ -482,7 +552,7 @@ OUTER_LOOP:
// If peer is lagging by height 1, send LastCommit.
if prs.Height != 0 && rs.Height == prs.Height+1 {
if ps.PickSendVote(rs.LastCommit) {
log.Info("Picked rs.LastCommit to send")
log.Debug("Picked rs.LastCommit to send")
continue OUTER_LOOP
}
}
@@ -492,10 +562,10 @@ OUTER_LOOP:
if prs.Height != 0 && rs.Height >= prs.Height+2 {
// Load the block commit for prs.Height,
// which contains precommit signatures for prs.Height.
commit := conR.blockStore.LoadBlockCommit(prs.Height)
commit := conR.conS.blockStore.LoadBlockCommit(prs.Height)
log.Info("Loaded BlockCommit for catch-up", "height", prs.Height, "commit", commit)
if ps.PickSendVote(commit) {
log.Info("Picked Catchup commit to send")
log.Debug("Picked Catchup commit to send")
continue OUTER_LOOP
}
}
@@ -503,7 +573,7 @@ OUTER_LOOP:
if sleeping == 0 {
// We sent nothing. Sleep...
sleeping = 1
log.Info("No votes to send, sleeping", "peer", peer,
log.Debug("No votes to send, sleeping", "peer", peer,
"localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes,
"localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits)
} else if sleeping == 2 {
@@ -516,6 +586,110 @@ OUTER_LOOP:
}
}
// NOTE: `queryMaj23Routine` has a simple crude design since it only comes
// into play for liveness when there's a signature DDoS attack happening.
func (conR *ConsensusReactor) queryMaj23Routine(peer *p2p.Peer, ps *PeerState) {
log := log.New("peer", peer)
OUTER_LOOP:
for {
// Manage disconnects from self or peer.
if !peer.IsRunning() || !conR.IsRunning() {
log.Notice(Fmt("Stopping queryMaj23Routine for %v.", peer))
return
}
// Maybe send Height/Round/Prevotes
{
rs := conR.conS.GetRoundState()
prs := ps.GetRoundState()
if rs.Height == prs.Height {
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.Round,
Type: types.VoteTypePrevote,
BlockID: maj23,
}})
time.Sleep(peerQueryMaj23SleepDuration)
}
}
}
// Maybe send Height/Round/Precommits
{
rs := conR.conS.GetRoundState()
prs := ps.GetRoundState()
if rs.Height == prs.Height {
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.Round,
Type: types.VoteTypePrecommit,
BlockID: maj23,
}})
time.Sleep(peerQueryMaj23SleepDuration)
}
}
}
// Maybe send Height/Round/ProposalPOL
{
rs := conR.conS.GetRoundState()
prs := ps.GetRoundState()
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.ProposalPOLRound,
Type: types.VoteTypePrevote,
BlockID: maj23,
}})
time.Sleep(peerQueryMaj23SleepDuration)
}
}
}
// Little point sending LastCommitRound/LastCommit,
// These are fleeting and non-blocking.
// Maybe send Height/CatchupCommitRound/CatchupCommit.
{
prs := ps.GetRoundState()
if prs.CatchupCommitRound != -1 && 0 < prs.Height && prs.Height <= conR.conS.blockStore.Height() {
commit := conR.conS.LoadCommit(prs.Height)
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{
Height: prs.Height,
Round: commit.Round(),
Type: types.VoteTypePrecommit,
BlockID: commit.BlockID,
}})
time.Sleep(peerQueryMaj23SleepDuration)
}
}
time.Sleep(peerQueryMaj23SleepDuration)
continue OUTER_LOOP
}
}
func (conR *ConsensusReactor) String() string {
// better not to access shared variables
return "ConsensusReactor" // conR.StringIndented("")
}
func (conR *ConsensusReactor) StringIndented(indent string) string {
s := "ConsensusReactor{\n"
s += indent + " " + conR.conS.StringIndented(indent+" ") + "\n"
for _, peer := range conR.Switch.Peers().List() {
ps := peer.Data.Get(types.PeerStateKey).(*PeerState)
s += indent + " " + ps.StringIndented(indent+" ") + "\n"
}
s += indent + "}"
return s
}
//-----------------------------------------------------------------------------
// Read only when returned by PeerState.GetRoundState().
@@ -537,6 +711,30 @@ type PeerRoundState struct {
CatchupCommit *BitArray // All commit precommits peer has for this height & CatchupCommitRound
}
func (prs PeerRoundState) String() string {
return prs.StringIndented("")
}
func (prs PeerRoundState) StringIndented(indent string) string {
return fmt.Sprintf(`PeerRoundState{
%s %v/%v/%v @%v
%s Proposal %v -> %v
%s POL %v (round %v)
%s Prevotes %v
%s Precommits %v
%s LastCommit %v (round %v)
%s Catchup %v (round %v)
%s}`,
indent, prs.Height, prs.Round, prs.Step, prs.StartTime,
indent, prs.ProposalBlockPartsHeader, prs.ProposalBlockParts,
indent, prs.ProposalPOL, prs.ProposalPOLRound,
indent, prs.Prevotes,
indent, prs.Precommits,
indent, prs.LastCommit, prs.LastCommitRound,
indent, prs.CatchupCommit, prs.CatchupCommitRound,
indent)
}
//-----------------------------------------------------------------------------
var (
@@ -610,24 +808,23 @@ func (ps *PeerState) SetHasProposalBlockPart(height int, round int, index int) {
ps.ProposalBlockParts.SetIndex(index, true)
}
// Convenience function to send vote to peer.
// PickVoteToSend sends vote to peer.
// Returns true if vote was sent.
func (ps *PeerState) PickSendVote(votes types.VoteSetReader) (ok bool) {
if index, vote, ok := ps.PickVoteToSend(votes); ok {
msg := &VoteMessage{index, vote}
ps.Peer.Send(VoteChannel, struct{ ConsensusMessage }{msg})
return true
if vote, ok := ps.PickVoteToSend(votes); ok {
msg := &VoteMessage{vote}
return ps.Peer.Send(VoteChannel, struct{ ConsensusMessage }{msg})
}
return false
}
// votes: Must be the correct Size() for the Height().
func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (index int, vote *types.Vote, ok bool) {
func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote, ok bool) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if votes.Size() == 0 {
return 0, nil, false
return nil, false
}
height, round, type_, size := votes.Height(), votes.Round(), votes.Type(), votes.Size()
@@ -640,16 +837,20 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (index int, vote
psVotes := ps.getVoteBitArray(height, round, type_)
if psVotes == nil {
return 0, nil, false // Not something worth sending
return nil, false // Not something worth sending
}
if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok {
ps.setHasVote(height, round, type_, index)
return index, votes.GetByIndex(index), true
return votes.GetByIndex(index), true
}
return 0, nil, false
return nil, false
}
func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *BitArray {
if !types.IsVoteTypeValid(type_) {
PanicSanity("Invalid vote type")
}
if ps.Height == height {
if ps.Round == round {
switch type_ {
@@ -657,8 +858,6 @@ func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *BitArray {
return ps.Prevotes
case types.VoteTypePrecommit:
return ps.Precommits
default:
PanicSanity(Fmt("Unexpected vote type %X", type_))
}
}
if ps.CatchupCommitRound == round {
@@ -667,8 +866,14 @@ func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *BitArray {
return nil
case types.VoteTypePrecommit:
return ps.CatchupCommit
default:
PanicSanity(Fmt("Unexpected vote type %X", type_))
}
}
if ps.ProposalPOLRound == round {
switch type_ {
case types.VoteTypePrevote:
return ps.ProposalPOL
case types.VoteTypePrecommit:
return nil
}
}
return nil
@@ -680,8 +885,6 @@ func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *BitArray {
return nil
case types.VoteTypePrecommit:
return ps.LastCommit
default:
PanicSanity(Fmt("Unexpected vote type %X", type_))
}
}
return nil
@@ -741,56 +944,19 @@ func (ps *PeerState) ensureVoteBitArrays(height int, numValidators int) {
}
}
func (ps *PeerState) SetHasVote(vote *types.Vote, index int) {
func (ps *PeerState) SetHasVote(vote *types.Vote) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
ps.setHasVote(vote.Height, vote.Round, vote.Type, index)
ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex)
}
func (ps *PeerState) setHasVote(height int, round int, type_ byte, index int) {
log := log.New("peer", ps.Peer, "peerRound", ps.Round, "height", height, "round", round)
if type_ != types.VoteTypePrevote && type_ != types.VoteTypePrecommit {
PanicSanity("Invalid vote type")
}
log.Debug("setHasVote(LastCommit)", "lastCommit", ps.LastCommit, "index", index)
if ps.Height == height {
if ps.Round == round {
switch type_ {
case types.VoteTypePrevote:
ps.Prevotes.SetIndex(index, true)
log.Info("SetHasVote(round-match)", "prevotes", ps.Prevotes, "index", index)
case types.VoteTypePrecommit:
ps.Precommits.SetIndex(index, true)
log.Info("SetHasVote(round-match)", "precommits", ps.Precommits, "index", index)
}
} else if ps.CatchupCommitRound == round {
switch type_ {
case types.VoteTypePrevote:
case types.VoteTypePrecommit:
ps.CatchupCommit.SetIndex(index, true)
log.Info("SetHasVote(CatchupCommit)", "precommits", ps.Precommits, "index", index)
}
} else if ps.ProposalPOLRound == round {
switch type_ {
case types.VoteTypePrevote:
ps.ProposalPOL.SetIndex(index, true)
log.Info("SetHasVote(ProposalPOL)", "prevotes", ps.Prevotes, "index", index)
case types.VoteTypePrecommit:
}
}
} else if ps.Height == height+1 {
if ps.LastCommitRound == round {
switch type_ {
case types.VoteTypePrevote:
case types.VoteTypePrecommit:
ps.LastCommit.SetIndex(index, true)
log.Info("setHasVote(LastCommit)", "lastCommit", ps.LastCommit, "index", index)
}
}
} else {
// Does not apply.
}
// NOTE: some may be nil BitArrays -> no side effects.
ps.getVoteBitArray(height, round, type_).SetIndex(index, true)
}
func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
@@ -858,17 +1024,6 @@ func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) {
ps.ProposalBlockParts = msg.BlockParts
}
func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if ps.Height != msg.Height {
return
}
ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
}
func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
@@ -885,6 +1040,52 @@ func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) {
ps.ProposalPOL = msg.ProposalPOL
}
func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if ps.Height != msg.Height {
return
}
ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
}
// The peer has responded with a bitarray of votes that it has
// of the corresponding BlockID.
// ourVotes: BitArray of votes we have for msg.BlockID
// NOTE: if ourVotes is nil (e.g. msg.Height < rs.Height),
// we conservatively overwrite ps's votes w/ msg.Votes.
func (ps *PeerState) ApplyVoteSetBitsMessage(msg *VoteSetBitsMessage, ourVotes *BitArray) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
votes := ps.getVoteBitArray(msg.Height, msg.Round, msg.Type)
if votes != nil {
if ourVotes == nil {
votes.Update(msg.Votes)
} else {
otherVotes := votes.Sub(ourVotes)
hasVotes := otherVotes.Or(msg.Votes)
votes.Update(hasVotes)
}
}
}
func (ps *PeerState) String() string {
return ps.StringIndented("")
}
func (ps *PeerState) StringIndented(indent string) string {
return fmt.Sprintf(`PeerState{
%s Key %v
%s PRS %v
%s}`,
indent, ps.Peer.Key,
indent, ps.PeerRoundState.StringIndented(indent+" "),
indent)
}
//-----------------------------------------------------------------------------
// Messages
@@ -896,6 +1097,8 @@ const (
msgTypeBlockPart = byte(0x13) // both block & POL
msgTypeVote = byte(0x14)
msgTypeHasVote = byte(0x15)
msgTypeVoteSetMaj23 = byte(0x16)
msgTypeVoteSetBits = byte(0x17)
)
type ConsensusMessage interface{}
@@ -909,6 +1112,8 @@ var _ = wire.RegisterInterface(
wire.ConcreteType{&BlockPartMessage{}, msgTypeBlockPart},
wire.ConcreteType{&VoteMessage{}, msgTypeVote},
wire.ConcreteType{&HasVoteMessage{}, msgTypeHasVote},
wire.ConcreteType{&VoteSetMaj23Message{}, msgTypeVoteSetMaj23},
wire.ConcreteType{&VoteSetBitsMessage{}, msgTypeVoteSetBits},
)
// TODO: check for unnecessary extra bytes at the end.
@@ -985,12 +1190,11 @@ func (m *BlockPartMessage) String() string {
//-------------------------------------
type VoteMessage struct {
ValidatorIndex int
Vote *types.Vote
Vote *types.Vote
}
func (m *VoteMessage) String() string {
return fmt.Sprintf("[Vote VI:%v V:%v VI:%v]", m.ValidatorIndex, m.Vote, m.ValidatorIndex)
return fmt.Sprintf("[Vote %v]", m.Vote)
}
//-------------------------------------
@@ -1005,3 +1209,30 @@ type HasVoteMessage struct {
func (m *HasVoteMessage) String() string {
return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v} VI:%v]", m.Index, m.Height, m.Round, m.Type, m.Index)
}
//-------------------------------------
type VoteSetMaj23Message struct {
Height int
Round int
Type byte
BlockID types.BlockID
}
func (m *VoteSetMaj23Message) String() string {
return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID)
}
//-------------------------------------
type VoteSetBitsMessage struct {
Height int
Round int
Type byte
BlockID types.BlockID
Votes *BitArray
}
func (m *VoteSetBitsMessage) String() string {
return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes)
}

309
consensus/reactor_test.go Normal file
View File

@@ -0,0 +1,309 @@
package consensus
import (
"fmt"
"sync"
"testing"
"time"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/go-events"
"github.com/tendermint/go-p2p"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/abci/example/dummy"
)
func init() {
config = tendermint_test.ResetConfig("consensus_reactor_test")
}
//----------------------------------------------
// in-process testnets
func startConsensusNet(t *testing.T, css []*ConsensusState, N int, subscribeEventRespond bool) ([]*ConsensusReactor, []chan interface{}) {
reactors := make([]*ConsensusReactor, N)
eventChans := make([]chan interface{}, N)
for i := 0; i < N; i++ {
reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states
eventSwitch := events.NewEventSwitch()
_, err := eventSwitch.Start()
if err != nil {
t.Fatalf("Failed to start switch: %v", err)
}
reactors[i].SetEventSwitch(eventSwitch)
if subscribeEventRespond {
eventChans[i] = subscribeToEventRespond(eventSwitch, "tester", types.EventStringNewBlock())
} else {
eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1)
}
}
// make connected switches and start all reactors
p2p.MakeConnectedSwitches(N, func(i int, s *p2p.Switch) *p2p.Switch {
s.AddReactor("CONSENSUS", reactors[i])
return s
}, p2p.Connect2Switches)
// now that everyone is connected, start the state machines
// If we started the state machines before everyone was connected,
// we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors
for i := 0; i < N; i++ {
s := reactors[i].conS.GetState()
reactors[i].SwitchToConsensus(s)
}
return reactors, eventChans
}
func stopConsensusNet(reactors []*ConsensusReactor) {
for _, r := range reactors {
r.Switch.Stop()
}
}
// Ensure a testnet makes blocks
func TestReactor(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
reactors, eventChans := startConsensusNet(t, css, N, false)
defer stopConsensusNet(reactors)
// wait till everyone makes the first new block
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) {
<-eventChans[j]
wg.Done()
}, css)
}
//-------------------------------------------------------------
// ensure we can make blocks despite cycling a validator set
func TestVotingPowerChange(t *testing.T) {
nVals := 4
css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentDummy)
reactors, eventChans := startConsensusNet(t, css, nVals, true)
defer stopConsensusNet(reactors)
// map of active validators
activeVals := make(map[string]struct{})
for i := 0; i < nVals; i++ {
activeVals[string(css[i].privValidator.GetAddress())] = struct{}{}
}
// wait till everyone makes block 1
timeoutWaitGroup(t, nVals, func(wg *sync.WaitGroup, j int) {
<-eventChans[j]
eventChans[j] <- struct{}{}
wg.Done()
}, css)
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing changing the voting power of one validator a few times")
val1PubKey := css[0].privValidator.(*types.PrivValidator).PubKey
updateValidatorTx := dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 25)
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
}
updateValidatorTx = dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 2)
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
}
updateValidatorTx = dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 100)
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
waitForAndValidateBlock(t, nVals, activeVals, eventChans, css)
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
}
}
func TestValidatorSetChanges(t *testing.T) {
nPeers := 7
nVals := 4
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentDummy)
reactors, eventChans := startConsensusNet(t, css, nPeers, true)
defer stopConsensusNet(reactors)
// map of active validators
activeVals := make(map[string]struct{})
for i := 0; i < nVals; i++ {
activeVals[string(css[i].privValidator.GetAddress())] = struct{}{}
}
// wait till everyone makes block 1
timeoutWaitGroup(t, nPeers, func(wg *sync.WaitGroup, j int) {
<-eventChans[j]
eventChans[j] <- struct{}{}
wg.Done()
}, css)
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing adding one validator")
newValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey
newValidatorTx1 := dummy.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), uint64(testMinPower))
// wait till everyone makes block 2
// ensure the commit includes all validators
// send newValTx to change vals in block 3
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx1)
// wait till everyone makes block 3.
// it includes the commit for block 2, which is by the original validator set
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
// wait till everyone makes block 4.
// it includes the commit for block 3, which is by the original validator set
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
// the commits for block 4 should be with the updated validator set
activeVals[string(newValidatorPubKey1.Address())] = struct{}{}
// wait till everyone makes block 5
// it includes the commit for block 4, which should have the updated validator set
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing changing the voting power of one validator")
updateValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey
updateValidatorTx1 := dummy.MakeValSetChangeTx(updateValidatorPubKey1.Bytes(), 25)
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
t.Errorf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[nVals].GetRoundState().LastValidators.TotalVotingPower())
}
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing adding two validators at once")
newValidatorPubKey2 := css[nVals+1].privValidator.(*types.PrivValidator).PubKey
newValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), uint64(testMinPower))
newValidatorPubKey3 := css[nVals+2].privValidator.(*types.PrivValidator).PubKey
newValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), uint64(testMinPower))
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
activeVals[string(newValidatorPubKey2.Address())] = struct{}{}
activeVals[string(newValidatorPubKey3.Address())] = struct{}{}
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
//---------------------------------------------------------------------------
log.Info("---------------------------- Testing removing two validators at once")
removeValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), 0)
removeValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), 0)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
delete(activeVals, string(newValidatorPubKey2.Address()))
delete(activeVals, string(newValidatorPubKey3.Address()))
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
}
// Check we can make blocks with skip_timeout_commit=false
func TestReactorWithTimeoutCommit(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
// override default SkipTimeoutCommit == true for tests
for i := 0; i < N; i++ {
css[i].timeoutParams.SkipTimeoutCommit = false
}
reactors, eventChans := startConsensusNet(t, css, N-1, false)
defer stopConsensusNet(reactors)
// wait till everyone makes the first new block
timeoutWaitGroup(t, N-1, func(wg *sync.WaitGroup, j int) {
<-eventChans[j]
wg.Done()
}, css)
}
func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) {
timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) {
newBlockI := <-eventChans[j]
newBlock := newBlockI.(types.EventDataNewBlock).Block
log.Warn("Got block", "height", newBlock.Height, "validator", j)
err := validateBlock(newBlock, activeVals)
if err != nil {
t.Fatal(err)
}
for _, tx := range txs {
css[j].mempool.CheckTx(tx, nil)
}
eventChans[j] <- struct{}{}
wg.Done()
log.Warn("Done wait group", "height", newBlock.Height, "validator", j)
}, css)
}
// expects high synchrony!
func validateBlock(block *types.Block, activeVals map[string]struct{}) error {
if block.LastCommit.Size() != len(activeVals) {
return fmt.Errorf("Commit size doesn't match number of active validators. Got %d, expected %d", block.LastCommit.Size(), len(activeVals))
}
for _, vote := range block.LastCommit.Precommits {
if _, ok := activeVals[string(vote.ValidatorAddress)]; !ok {
return fmt.Errorf("Found vote for unactive validator %X", vote.ValidatorAddress)
}
}
return nil
}
func timeoutWaitGroup(t *testing.T, n int, f func(*sync.WaitGroup, int), css []*ConsensusState) {
wg := new(sync.WaitGroup)
wg.Add(n)
for i := 0; i < n; i++ {
go f(wg, i)
}
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case <-done:
case <-time.After(time.Second * 10):
for i, cs := range css {
fmt.Println("#################")
fmt.Println("Validator", i)
fmt.Println(cs.GetRoundState())
fmt.Println("")
}
panic("Timed out waiting for all validators to commit a block")
}
}

View File

@@ -1,27 +1,46 @@
package consensus
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"os"
"reflect"
"strconv"
"strings"
"time"
abci "github.com/tendermint/abci/types"
auto "github.com/tendermint/go-autofile"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
// unmarshal and apply a single message to the consensus state
// Functionality to replay blocks and messages on recovery from a crash.
// There are two general failure scenarios: failure during consensus, and failure while applying the block.
// The former is handled by the WAL, the latter by the proxyApp Handshake on restart,
// which ultimately hands off the work to the WAL.
//-----------------------------------------
// recover from failure during consensus
// by replaying messages from the WAL
// Unmarshal and apply a single message to the consensus state
// as if it were received in receiveRoutine
// Lines that start with "#" are ignored.
// NOTE: receiveRoutine should not be running
func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan interface{}) error {
// Skip over empty and meta lines
if len(msgBytes) == 0 || msgBytes[0] == '#' {
return nil
}
var err error
var msg ConsensusLogMessage
var msg TimedWALMessage
wire.ReadJSON(&msg, msgBytes, &err)
if err != nil {
fmt.Println("MsgBytes:", msgBytes, string(msgBytes))
@@ -31,7 +50,7 @@ func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan inte
// for logging
switch m := msg.Msg.(type) {
case types.EventDataRoundState:
log.Notice("New Step", "height", m.Height, "round", m.Round, "step", m.Step)
log.Notice("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step)
// these are playback checks
ticker := time.After(time.Second * 2)
if newStepCh != nil {
@@ -53,309 +72,344 @@ func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan inte
switch msg := m.Msg.(type) {
case *ProposalMessage:
p := msg.Proposal
log.Notice("Proposal", "height", p.Height, "round", p.Round, "header",
log.Notice("Replay: Proposal", "height", p.Height, "round", p.Round, "header",
p.BlockPartsHeader, "pol", p.POLRound, "peer", peerKey)
case *BlockPartMessage:
log.Notice("BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerKey)
log.Notice("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerKey)
case *VoteMessage:
v := msg.Vote
log.Notice("Vote", "height", v.Height, "round", v.Round, "type", v.Type,
"hash", v.BlockHash, "header", v.BlockPartsHeader, "peer", peerKey)
}
// internal or from peer
if m.PeerKey == "" {
cs.internalMsgQueue <- m
} else {
cs.peerMsgQueue <- m
log.Notice("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type,
"blockID", v.BlockID, "peer", peerKey)
}
cs.handleMsg(m, cs.RoundState)
case timeoutInfo:
log.Notice("Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration)
cs.tockChan <- m
log.Notice("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration)
cs.handleTimeout(m, cs.RoundState)
default:
return fmt.Errorf("Unknown ConsensusLogMessage type: %v", reflect.TypeOf(msg.Msg))
return fmt.Errorf("Replay: Unknown TimedWALMessage type: %v", reflect.TypeOf(msg.Msg))
}
return nil
}
// replay only those messages since the last block
func (cs *ConsensusState) catchupReplay(height int) error {
if cs.wal == nil {
log.Warn("consensus msg log is nil")
return nil
// replay only those messages since the last block.
// timeoutRoutine should run concurrently to read off tickChan
func (cs *ConsensusState) catchupReplay(csHeight int) error {
// set replayMode
cs.replayMode = true
defer func() { cs.replayMode = false }()
// Ensure that ENDHEIGHT for this height doesn't exist
// NOTE: This is just a sanity check. As far as we know things work fine without it,
// and Handshake could reuse ConsensusState if it weren't for this check (since we can crash after writing ENDHEIGHT).
gr, found, err := cs.wal.group.Search("#ENDHEIGHT: ", makeHeightSearchFunc(csHeight))
if gr != nil {
gr.Close()
}
if !cs.wal.exists {
// new wal, nothing to catchup on
return nil
if found {
return errors.New(Fmt("WAL should not contain #ENDHEIGHT %d.", csHeight))
}
// starting from end of file,
// read messages until a new height is found
nLines, err := cs.wal.SeekFromEnd(func(lineBytes []byte) bool {
var err error
var msg ConsensusLogMessage
wire.ReadJSON(&msg, lineBytes, &err)
if err != nil {
panic(Fmt("Failed to read cs_msg_log json: %v", err))
}
m, ok := msg.Msg.(types.EventDataRoundState)
if ok && m.Step == RoundStepNewHeight.String() {
// TODO: ensure the height matches
return true
}
return false
})
if err != nil {
return err
}
var beginning bool // if we had to go back to the beginning
if c, _ := cs.wal.fp.Seek(0, 1); c == 0 {
beginning = true
}
log.Notice("Catchup by replaying consensus messages", "n", nLines)
// now we can replay the latest nLines on consensus state
// note we can't use scan because we've already been reading from the file
reader := bufio.NewReader(cs.wal.fp)
for i := 0; i < nLines; i++ {
msgBytes, err := reader.ReadBytes('\n')
// Search for last height marker
gr, found, err = cs.wal.group.Search("#ENDHEIGHT: ", makeHeightSearchFunc(csHeight-1))
if err == io.EOF {
log.Warn("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", csHeight-1)
// if we upgraded from 0.9 to 0.9.1, we may have #HEIGHT instead
// TODO (0.10.0): remove this
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
if err == io.EOF {
break
log.Warn("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
return nil
} else if err != nil {
return err
} else if len(msgBytes) == 0 {
continue
} else if len(msgBytes) == 1 && msgBytes[0] == '\n' {
continue
}
// the first msg is the NewHeight event (if we're not at the beginning), so we can ignore it
if !beginning && i == 1 {
continue
} else if err != nil {
return err
} else {
defer gr.Close()
}
if !found {
// if we upgraded from 0.9 to 0.9.1, we may have #HEIGHT instead
// TODO (0.10.0): remove this
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
if err == io.EOF {
log.Warn("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
return nil
} else if err != nil {
return err
} else {
defer gr.Close()
}
// TODO (0.10.0): uncomment
// return errors.New(Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1))
}
log.Notice("Catchup by replaying consensus messages", "height", csHeight)
for {
line, err := gr.ReadLine()
if err != nil {
if err == io.EOF {
break
} else {
return err
}
}
// NOTE: since the priv key is set when the msgs are received
// it will attempt to eg double sign but we can just ignore it
// since the votes will be replayed and we'll get to the next step
if err := cs.readReplayMessage(msgBytes, nil); err != nil {
if err := cs.readReplayMessage([]byte(line), nil); err != nil {
return err
}
}
log.Info("Done catchup replay")
log.Notice("Replay: Done")
return nil
}
//--------------------------------------------------------
// replay messages interactively or all at once
//--------------------------------------------------------------------------------
// Interactive playback
func (cs ConsensusState) ReplayConsole(file string) error {
return cs.replay(file, true)
// Parses marker lines of the form:
// #ENDHEIGHT: 12345
func makeHeightSearchFunc(height int) auto.SearchFunc {
return func(line string) (int, error) {
line = strings.TrimRight(line, "\n")
parts := strings.Split(line, " ")
if len(parts) != 2 {
return -1, errors.New("Line did not have 2 parts")
}
i, err := strconv.Atoi(parts[1])
if err != nil {
return -1, errors.New("Failed to parse INFO: " + err.Error())
}
if height < i {
return 1, nil
} else if height == i {
return 0, nil
} else {
return -1, nil
}
}
}
// Full playback, with tests
func (cs ConsensusState) ReplayMessages(file string) error {
return cs.replay(file, false)
//----------------------------------------------
// Recover from failure during block processing
// by handshaking with the app to figure out where
// we were last and using the WAL to recover there
type Handshaker struct {
config cfg.Config
state *sm.State
store types.BlockStore
nBlocks int // number of blocks applied to the state
}
// replay all msgs or start the console
func (cs *ConsensusState) replay(file string, console bool) error {
if cs.IsRunning() {
return errors.New("cs is already running, cannot replay")
}
if cs.wal != nil {
return errors.New("cs wal is open, cannot replay")
}
func NewHandshaker(config cfg.Config, state *sm.State, store types.BlockStore) *Handshaker {
return &Handshaker{config, state, store, 0}
}
cs.startForReplay()
func (h *Handshaker) NBlocks() int {
return h.nBlocks
}
// ensure all new step events are regenerated as expected
newStepCh := subscribeToEvent(cs.evsw, "replay-test", types.EventStringNewRoundStep(), 1)
var ErrReplayLastBlockTimeout = errors.New("Timed out waiting for last block to be replayed")
// just open the file for reading, no need to use wal
fp, err := os.OpenFile(file, os.O_RDONLY, 0666)
// TODO: retry the handshake/replay if it fails ?
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
// handshake is done via info request on the query conn
res, err := proxyApp.Query().InfoSync()
if err != nil {
return err
return errors.New(Fmt("Error calling Info: %v", err))
}
pb := newPlayback(file, fp, cs, cs.state.Copy())
defer pb.fp.Close()
blockHeight := int(res.LastBlockHeight) // XXX: beware overflow
appHash := res.LastBlockAppHash
var nextN int // apply N msgs in a row
for pb.scanner.Scan() {
if nextN == 0 && console {
nextN = pb.replayConsoleLoop()
log.Notice("ABCI Handshake", "appHeight", blockHeight, "appHash", appHash)
// TODO: check version
// replay blocks up to the latest in the blockstore
_, err = h.ReplayBlocks(appHash, blockHeight, proxyApp)
if err == ErrReplayLastBlockTimeout {
log.Warn("Failed to sync via handshake. Trying other means. If they fail, please increase the timeout_handshake parameter")
return nil
} else if err != nil {
return errors.New(Fmt("Error on replay: %v", err))
}
log.Notice("Completed ABCI Handshake - Tendermint and App are synced", "appHeight", blockHeight, "appHash", appHash)
// TODO: (on restart) replay mempool
return nil
}
// Replay all blocks since appBlockHeight and ensure the result matches the current state.
// Returns the final AppHash or an error
func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp proxy.AppConns) ([]byte, error) {
storeBlockHeight := h.store.Height()
stateBlockHeight := h.state.LastBlockHeight
log.Notice("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight)
// First handle edge cases and constraints on the storeBlockHeight
if storeBlockHeight == 0 {
return appHash, h.checkAppHash(appHash)
} else if storeBlockHeight < appBlockHeight {
// the app should never be ahead of the store (but this is under app's control)
return appHash, sm.ErrAppBlockHeightTooHigh{storeBlockHeight, appBlockHeight}
} else if storeBlockHeight < stateBlockHeight {
// the state should never be ahead of the store (this is under tendermint's control)
PanicSanity(Fmt("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
} else if storeBlockHeight > stateBlockHeight+1 {
// store should be at most one ahead of the state (this is under tendermint's control)
PanicSanity(Fmt("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
}
// Now either store is equal to state, or one ahead.
// For each, consider all cases of where the app could be, given app <= store
if storeBlockHeight == stateBlockHeight {
// Tendermint ran Commit and saved the state.
// Either the app is asking for replay, or we're all synced up.
if appBlockHeight < storeBlockHeight {
// the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store)
return h.replayBlocks(proxyApp, appBlockHeight, storeBlockHeight, false)
} else if appBlockHeight == storeBlockHeight {
// We're good!
return appHash, h.checkAppHash(appHash)
}
if err := pb.cs.readReplayMessage(pb.scanner.Bytes(), newStepCh); err != nil {
return err
} else if storeBlockHeight == stateBlockHeight+1 {
// We saved the block in the store but haven't updated the state,
// so we'll need to replay a block using the WAL.
if appBlockHeight < stateBlockHeight {
// the app is further behind than it should be, so replay blocks
// but leave the last block to go through the WAL
return h.replayBlocks(proxyApp, appBlockHeight, storeBlockHeight, true)
} else if appBlockHeight == stateBlockHeight {
// We haven't run Commit (both the state and app are one block behind),
// so replayBlock with the real app.
// NOTE: We could instead use the cs.WAL on cs.Start,
// but we'd have to allow the WAL to replay a block that wrote it's ENDHEIGHT
log.Info("Replay last block using real app")
return h.replayBlock(storeBlockHeight, proxyApp.Consensus())
} else if appBlockHeight == storeBlockHeight {
// We ran Commit, but didn't save the state, so replayBlock with mock app
abciResponses := h.state.LoadABCIResponses()
mockApp := newMockProxyApp(appHash, abciResponses)
log.Info("Replay last block using mock app")
return h.replayBlock(storeBlockHeight, mockApp)
}
if nextN > 0 {
nextN -= 1
}
PanicSanity("Should never happen")
return nil, nil
}
func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int, mutateState bool) ([]byte, error) {
// App is further behind than it should be, so we need to replay blocks.
// We replay all blocks from appBlockHeight+1.
// Note that we don't have an old version of the state,
// so we by-pass state validation/mutation using sm.ExecCommitBlock.
// If mutateState == true, the final block is replayed with h.replayBlock()
var appHash []byte
var err error
finalBlock := storeBlockHeight
if mutateState {
finalBlock -= 1
}
for i := appBlockHeight + 1; i <= finalBlock; i++ {
log.Info("Applying block", "height", i)
block := h.store.LoadBlock(i)
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block)
if err != nil {
return nil, err
}
pb.count += 1
h.nBlocks += 1
}
if mutateState {
// sync the final block
return h.replayBlock(storeBlockHeight, proxyApp.Consensus())
}
return appHash, h.checkAppHash(appHash)
}
// ApplyBlock on the proxyApp with the last block.
func (h *Handshaker) replayBlock(height int, proxyApp proxy.AppConnConsensus) ([]byte, error) {
mempool := types.MockMempool{}
var eventCache types.Fireable // nil
block := h.store.LoadBlock(height)
meta := h.store.LoadBlockMeta(height)
if err := h.state.ApplyBlock(eventCache, proxyApp, block, meta.BlockID.PartsHeader, mempool); err != nil {
return nil, err
}
h.nBlocks += 1
return h.state.AppHash, nil
}
func (h *Handshaker) checkAppHash(appHash []byte) error {
if !bytes.Equal(h.state.AppHash, appHash) {
panic(errors.New(Fmt("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, h.state.AppHash)).Error())
return nil
}
return nil
}
//------------------------------------------------
// playback manager
//--------------------------------------------------------------------------------
// mockProxyApp uses ABCIResponses to give the right results
// Useful because we don't want to call Commit() twice for the same block on the real app.
type playback struct {
cs *ConsensusState
fp *os.File
scanner *bufio.Scanner
count int // how many lines/msgs into the file are we
// replays can be reset to beginning
fileName string // so we can close/reopen the file
genesisState *sm.State // so the replay session knows where to restart from
func newMockProxyApp(appHash []byte, abciResponses *sm.ABCIResponses) proxy.AppConnConsensus {
clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{
appHash: appHash,
abciResponses: abciResponses,
})
cli, _ := clientCreator.NewABCIClient()
return proxy.NewAppConnConsensus(cli)
}
func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState *sm.State) *playback {
return &playback{
cs: cs,
fp: fp,
fileName: fileName,
genesisState: genState,
scanner: bufio.NewScanner(fp),
type mockProxyApp struct {
abci.BaseApplication
appHash []byte
txCount int
abciResponses *sm.ABCIResponses
}
func (mock *mockProxyApp) DeliverTx(tx []byte) abci.Result {
r := mock.abciResponses.DeliverTx[mock.txCount]
mock.txCount += 1
return abci.Result{
r.Code,
r.Data,
r.Log,
}
}
// go back count steps by resetting the state and running (pb.count - count) steps
func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
pb.cs.Stop()
newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.proxyAppConn, pb.cs.blockStore, pb.cs.mempool)
newCS.SetEventSwitch(pb.cs.evsw)
newCS.startForReplay()
pb.fp.Close()
fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0666)
if err != nil {
return err
}
pb.fp = fp
pb.scanner = bufio.NewScanner(fp)
count = pb.count - count
log.Notice(Fmt("Reseting from %d to %d", pb.count, count))
pb.count = 0
pb.cs = newCS
for i := 0; pb.scanner.Scan() && i < count; i++ {
if err := pb.cs.readReplayMessage(pb.scanner.Bytes(), newStepCh); err != nil {
return err
}
pb.count += 1
}
return nil
func (mock *mockProxyApp) EndBlock(height uint64) abci.ResponseEndBlock {
mock.txCount = 0
return mock.abciResponses.EndBlock
}
func (cs *ConsensusState) startForReplay() {
cs.BaseService.OnStart()
go cs.receiveRoutine(0)
// since we replay tocks we just ignore ticks
go func() {
for {
select {
case <-cs.tickChan:
case <-cs.Quit:
return
}
}
}()
}
// console function for parsing input and running commands
func (pb *playback) replayConsoleLoop() int {
for {
fmt.Printf("> ")
bufReader := bufio.NewReader(os.Stdin)
line, more, err := bufReader.ReadLine()
if more {
Exit("input is too long")
} else if err != nil {
Exit(err.Error())
}
tokens := strings.Split(string(line), " ")
if len(tokens) == 0 {
continue
}
switch tokens[0] {
case "next":
// "next" -> replay next message
// "next N" -> replay next N messages
if len(tokens) == 1 {
return 0
} else {
i, err := strconv.Atoi(tokens[1])
if err != nil {
fmt.Println("next takes an integer argument")
} else {
return i
}
}
case "back":
// "back" -> go back one message
// "back N" -> go back N messages
// NOTE: "back" is not supported in the state machine design,
// so we restart and replay up to
// ensure all new step events are regenerated as expected
newStepCh := subscribeToEvent(pb.cs.evsw, "replay-test", types.EventStringNewRoundStep(), 1)
if len(tokens) == 1 {
pb.replayReset(1, newStepCh)
} else {
i, err := strconv.Atoi(tokens[1])
if err != nil {
fmt.Println("back takes an integer argument")
} else if i > pb.count {
fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count)
} else {
pb.replayReset(i, newStepCh)
}
}
case "rs":
// "rs" -> print entire round state
// "rs short" -> print height/round/step
// "rs <field>" -> print another field of the round state
rs := pb.cs.RoundState
if len(tokens) == 1 {
fmt.Println(rs)
} else {
switch tokens[1] {
case "short":
fmt.Printf("%v/%v/%v\n", rs.Height, rs.Round, rs.Step)
case "validators":
fmt.Println(rs.Validators)
case "proposal":
fmt.Println(rs.Proposal)
case "proposal_block":
fmt.Printf("%v %v\n", rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort())
case "locked_round":
fmt.Println(rs.LockedRound)
case "locked_block":
fmt.Printf("%v %v\n", rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort())
case "votes":
fmt.Println(rs.Votes.StringIndented(" "))
default:
fmt.Println("Unknown option", tokens[1])
}
}
case "n":
fmt.Println(pb.count)
}
}
return 0
func (mock *mockProxyApp) Commit() abci.Result {
return abci.NewResultOK(mock.appHash, "")
}

269
consensus/replay_file.go Normal file
View File

@@ -0,0 +1,269 @@
package consensus
import (
"bufio"
"errors"
"fmt"
"os"
"strconv"
"strings"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
dbm "github.com/tendermint/go-db"
bc "github.com/tendermint/tendermint/blockchain"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
//--------------------------------------------------------
// replay messages interactively or all at once
func RunReplayFile(config cfg.Config, walFile string, console bool) {
consensusState := newConsensusStateForReplay(config)
if err := consensusState.ReplayFile(walFile, console); err != nil {
Exit(Fmt("Error during consensus replay: %v", err))
}
}
// Replay msgs in file or start the console
func (cs *ConsensusState) ReplayFile(file string, console bool) error {
if cs.IsRunning() {
return errors.New("cs is already running, cannot replay")
}
if cs.wal != nil {
return errors.New("cs wal is open, cannot replay")
}
cs.startForReplay()
// ensure all new step events are regenerated as expected
newStepCh := subscribeToEvent(cs.evsw, "replay-test", types.EventStringNewRoundStep(), 1)
// just open the file for reading, no need to use wal
fp, err := os.OpenFile(file, os.O_RDONLY, 0666)
if err != nil {
return err
}
pb := newPlayback(file, fp, cs, cs.state.Copy())
defer pb.fp.Close()
var nextN int // apply N msgs in a row
for pb.scanner.Scan() {
if nextN == 0 && console {
nextN = pb.replayConsoleLoop()
}
if err := pb.cs.readReplayMessage(pb.scanner.Bytes(), newStepCh); err != nil {
return err
}
if nextN > 0 {
nextN -= 1
}
pb.count += 1
}
return nil
}
//------------------------------------------------
// playback manager
type playback struct {
cs *ConsensusState
fp *os.File
scanner *bufio.Scanner
count int // how many lines/msgs into the file are we
// replays can be reset to beginning
fileName string // so we can close/reopen the file
genesisState *sm.State // so the replay session knows where to restart from
}
func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState *sm.State) *playback {
return &playback{
cs: cs,
fp: fp,
fileName: fileName,
genesisState: genState,
scanner: bufio.NewScanner(fp),
}
}
// go back count steps by resetting the state and running (pb.count - count) steps
func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
pb.cs.Stop()
pb.cs.Wait()
newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.proxyAppConn, pb.cs.blockStore, pb.cs.mempool)
newCS.SetEventSwitch(pb.cs.evsw)
newCS.startForReplay()
pb.fp.Close()
fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0666)
if err != nil {
return err
}
pb.fp = fp
pb.scanner = bufio.NewScanner(fp)
count = pb.count - count
log.Notice(Fmt("Reseting from %d to %d", pb.count, count))
pb.count = 0
pb.cs = newCS
for i := 0; pb.scanner.Scan() && i < count; i++ {
if err := pb.cs.readReplayMessage(pb.scanner.Bytes(), newStepCh); err != nil {
return err
}
pb.count += 1
}
return nil
}
func (cs *ConsensusState) startForReplay() {
log.Warn("Replay commands are disabled until someone updates them and writes tests")
/* TODO:!
// since we replay tocks we just ignore ticks
go func() {
for {
select {
case <-cs.tickChan:
case <-cs.Quit:
return
}
}
}()*/
}
// console function for parsing input and running commands
func (pb *playback) replayConsoleLoop() int {
for {
fmt.Printf("> ")
bufReader := bufio.NewReader(os.Stdin)
line, more, err := bufReader.ReadLine()
if more {
Exit("input is too long")
} else if err != nil {
Exit(err.Error())
}
tokens := strings.Split(string(line), " ")
if len(tokens) == 0 {
continue
}
switch tokens[0] {
case "next":
// "next" -> replay next message
// "next N" -> replay next N messages
if len(tokens) == 1 {
return 0
} else {
i, err := strconv.Atoi(tokens[1])
if err != nil {
fmt.Println("next takes an integer argument")
} else {
return i
}
}
case "back":
// "back" -> go back one message
// "back N" -> go back N messages
// NOTE: "back" is not supported in the state machine design,
// so we restart and replay up to
// ensure all new step events are regenerated as expected
newStepCh := subscribeToEvent(pb.cs.evsw, "replay-test", types.EventStringNewRoundStep(), 1)
if len(tokens) == 1 {
pb.replayReset(1, newStepCh)
} else {
i, err := strconv.Atoi(tokens[1])
if err != nil {
fmt.Println("back takes an integer argument")
} else if i > pb.count {
fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count)
} else {
pb.replayReset(i, newStepCh)
}
}
case "rs":
// "rs" -> print entire round state
// "rs short" -> print height/round/step
// "rs <field>" -> print another field of the round state
rs := pb.cs.RoundState
if len(tokens) == 1 {
fmt.Println(rs)
} else {
switch tokens[1] {
case "short":
fmt.Printf("%v/%v/%v\n", rs.Height, rs.Round, rs.Step)
case "validators":
fmt.Println(rs.Validators)
case "proposal":
fmt.Println(rs.Proposal)
case "proposal_block":
fmt.Printf("%v %v\n", rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort())
case "locked_round":
fmt.Println(rs.LockedRound)
case "locked_block":
fmt.Printf("%v %v\n", rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort())
case "votes":
fmt.Println(rs.Votes.StringIndented(" "))
default:
fmt.Println("Unknown option", tokens[1])
}
}
case "n":
fmt.Println(pb.count)
}
}
return 0
}
//--------------------------------------------------------------------------------
// convenience for replay mode
func newConsensusStateForReplay(config cfg.Config) *ConsensusState {
// Get BlockStore
blockStoreDB := dbm.NewDB("blockstore", config.GetString("db_backend"), config.GetString("db_dir"))
blockStore := bc.NewBlockStore(blockStoreDB)
// Get State
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
// Create proxyAppConn connection (consensus, mempool, query)
proxyApp := proxy.NewAppConns(config, proxy.DefaultClientCreator(config), NewHandshaker(config, state, blockStore))
_, err := proxyApp.Start()
if err != nil {
Exit(Fmt("Error starting proxy app conns: %v", err))
}
// add the chainid to the global config
config.Set("chain_id", state.ChainID)
// Make event switch
eventSwitch := types.NewEventSwitch()
if _, err := eventSwitch.Start(); err != nil {
Exit(Fmt("Failed to start event switch: %v", err))
}
mempool := mempl.NewMempool(config, proxyApp.Mempool())
consensusState := NewConsensusState(config, state.Copy(), proxyApp.Consensus(), blockStore, mempool)
consensusState.SetEventSwitch(eventSwitch)
return consensusState
}

View File

@@ -1,106 +1,641 @@
package consensus
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"strings"
"testing"
"time"
. "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/abci/example/dummy"
cmn "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-crypto"
dbm "github.com/tendermint/go-db"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
/*
The easiest way to generate this data is to copy ~/.tendermint_test/somedir/* to ~/.tendermint
and to run a local node.
Be sure to set the db to "leveldb" to create a cswal file in ~/.tendermint/data/cswal.
func init() {
config = tendermint_test.ResetConfig("consensus_replay_test")
}
If you need to change the signatures, you can use a script as follows:
The privBytes comes from config/tendermint_test/...
// These tests ensure we can always recover from failure at any part of the consensus process.
// There are two general failure scenarios: failure during consensus, and failure while applying the block.
// Only the latter interacts with the app and store,
// but the former has to deal with restrictions on re-use of priv_validator keys.
// The `WAL Tests` are for failures during the consensus;
// the `Handshake Tests` are for failures in applying the block.
// With the help of the WAL, we can recover from it all!
```
package main
var data_dir = path.Join(cmn.GoPath, "src/github.com/tendermint/tendermint/consensus", "test_data")
import (
"encoding/hex"
"fmt"
//------------------------------------------------------------------------------------------
// WAL Tests
"github.com/tendermint/go-crypto"
)
// TODO: It would be better to verify explicitly which states we can recover from without the wal
// and which ones we need the wal for - then we'd also be able to only flush the
// wal writer when we need to, instead of with every message.
func main() {
signBytes, err := hex.DecodeString("7B22636861696E5F6964223A2274656E6465726D696E745F74657374222C22766F7465223A7B22626C6F636B5F68617368223A2242453544373939433846353044354645383533364334333932464443384537423342313830373638222C22626C6F636B5F70617274735F686561646572223A506172745365747B543A31204236323237323535464632307D2C22686569676874223A312C22726F756E64223A302C2274797065223A327D7D")
if err != nil {
panic(err)
}
privBytes, err := hex.DecodeString("27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8")
if err != nil {
panic(err)
}
privKey := crypto.PrivKeyEd25519{}
copy(privKey[:], privBytes)
signature := privKey.Sign(signBytes)
signatureEd25519 := signature.(crypto.SignatureEd25519)
fmt.Printf("Signature Bytes: %X\n", signatureEd25519[:])
// the priv validator changes step at these lines for a block with 1 val and 1 part
var baseStepChanges = []int{3, 6, 8}
// test recovery from each line in each testCase
var testCases = []*testCase{
newTestCase("empty_block", baseStepChanges), // empty block (has 1 block part)
newTestCase("small_block1", baseStepChanges), // small block with txs in 1 block part
newTestCase("small_block2", []int{3, 10, 12}), // small block with txs across 5 smaller block parts
}
type testCase struct {
name string
log string //full cs wal
stepMap map[int]int8 // map lines of log to privval step
proposeLine int
prevoteLine int
precommitLine int
}
func newTestCase(name string, stepChanges []int) *testCase {
if len(stepChanges) != 3 {
panic(cmn.Fmt("a full wal has 3 step changes! Got array %v", stepChanges))
}
```
*/
return &testCase{
name: name,
log: readWAL(path.Join(data_dir, name+".cswal")),
stepMap: newMapFromChanges(stepChanges),
var testLog = `{"time":"2016-04-03T11:23:54.387Z","msg":[3,{"duration":972835254,"height":1,"round":0,"step":1}]}
{"time":"2016-04-03T11:23:54.388Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2016-04-03T11:23:54.388Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"3BA1E90CB868DA6B4FD7F3589826EC461E9EB4EF"},"pol_round":-1,"signature":"3A2ECD5023B21EC144EC16CFF1B992A4321317B83EEDD8969FDFEA6EB7BF4389F38DDA3E7BB109D63A07491C16277A197B241CF1F05F5E485C59882ECACD9E07"}}],"peer_key":""}]}
{"time":"2016-04-03T11:23:54.389Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F7465737401011441D59F4B718AC00000000000000114C4B01D3810579550997AC5641E759E20D99B51C10001000100","proof":{"aunts":[]}}}],"peer_key":""}]}
{"time":"2016-04-03T11:23:54.390Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
{"time":"2016-04-03T11:23:54.390Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":1,"round":0,"type":1,"block_hash":"4291966B8A9DFBA00AEC7C700F2718E61DF4331D","block_parts_header":{"total":1,"hash":"3BA1E90CB868DA6B4FD7F3589826EC461E9EB4EF"},"signature":"47D2A75A4E2F15DB1F0D1B656AC0637AF9AADDFEB6A156874F6553C73895E5D5DC948DBAEF15E61276C5342D0E638DFCB77C971CD282096EA8735A564A90F008"}}],"peer_key":""}]}
{"time":"2016-04-03T11:23:54.392Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2016-04-03T11:23:54.392Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":1,"round":0,"type":2,"block_hash":"4291966B8A9DFBA00AEC7C700F2718E61DF4331D","block_parts_header":{"total":1,"hash":"3BA1E90CB868DA6B4FD7F3589826EC461E9EB4EF"},"signature":"39147DA595F08B73CF8C899967C8403B5872FD9042FFA4E239159E0B6C5D9665C9CA81D766EACA2AE658872F94C2FCD1E34BF51859CD5B274DA8512BACE4B50D"}}],"peer_key":""}]}
`
proposeLine: stepChanges[0],
prevoteLine: stepChanges[1],
precommitLine: stepChanges[2],
}
}
func TestReplayCatchup(t *testing.T) {
// write the needed wal to file
f, err := ioutil.TempFile(os.TempDir(), "replay_test_")
func newMapFromChanges(changes []int) map[int]int8 {
changes = append(changes, changes[2]+1) // so we add the last step change to the map
m := make(map[int]int8)
var count int
for changeNum, nextChange := range changes {
for ; count < nextChange; count++ {
m[count] = int8(changeNum)
}
}
return m
}
func readWAL(p string) string {
b, err := ioutil.ReadFile(p)
if err != nil {
panic(err)
}
name := f.Name()
_, err = f.WriteString(testLog)
return string(b)
}
func writeWAL(walMsgs string) string {
tempDir := os.TempDir()
walDir := path.Join(tempDir, "/wal"+cmn.RandStr(12))
walFile := path.Join(walDir, "wal")
// Create WAL directory
err := cmn.EnsureDir(walDir, 0700)
if err != nil {
panic(err)
}
f.Close()
cs := fixedConsensusState()
// we've already precommitted on the first block
// without replay catchup we would be halted here forever
cs.privValidator.LastHeight = 1 // first block
cs.privValidator.LastStep = 3 // precommit
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 0)
// start timeout and receive routines
cs.startRoutines(0)
// open wal and run catchup messages
openWAL(t, cs, name)
if err := cs.catchupReplay(cs.Height); err != nil {
panic(Fmt("Error on catchup replay %v", err))
// Write the needed WAL to file
err = cmn.WriteFile(walFile, []byte(walMsgs), 0600)
if err != nil {
panic(err)
}
return walFile
}
after := time.After(time.Second * 15)
func waitForBlock(newBlockCh chan interface{}, thisCase *testCase, i int) {
after := time.After(time.Second * 10)
select {
case <-newBlockCh:
case <-after:
panic("Timed out waiting for new block")
panic(cmn.Fmt("Timed out waiting for new block for case '%s' line %d", thisCase.name, i))
}
}
func openWAL(t *testing.T, cs *ConsensusState, file string) {
// open the wal
wal, err := NewWAL(file, config.GetBool("cswal_light"))
func runReplayTest(t *testing.T, cs *ConsensusState, walFile string, newBlockCh chan interface{},
thisCase *testCase, i int) {
cs.config.Set("cs_wal_file", walFile)
cs.Start()
// Wait to make a new block.
// This is just a signal that we haven't halted; its not something contained in the WAL itself.
// Assuming the consensus state is running, replay of any WAL, including the empty one,
// should eventually be followed by a new block, or else something is wrong
waitForBlock(newBlockCh, thisCase, i)
cs.evsw.Stop()
cs.Stop()
LOOP:
for {
select {
case <-newBlockCh:
default:
break LOOP
}
}
cs.Wait()
}
func toPV(pv PrivValidator) *types.PrivValidator {
return pv.(*types.PrivValidator)
}
func setupReplayTest(thisCase *testCase, nLines int, crashAfter bool) (*ConsensusState, chan interface{}, string, string) {
fmt.Println("-------------------------------------")
log.Notice(cmn.Fmt("Starting replay test %v (of %d lines of WAL). Crash after = %v", thisCase.name, nLines, crashAfter))
lineStep := nLines
if crashAfter {
lineStep -= 1
}
split := strings.Split(thisCase.log, "\n")
lastMsg := split[nLines]
// we write those lines up to (not including) one with the signature
walFile := writeWAL(strings.Join(split[:nLines], "\n") + "\n")
cs := fixedConsensusStateDummy()
// set the last step according to when we crashed vs the wal
toPV(cs.privValidator).LastHeight = 1 // first block
toPV(cs.privValidator).LastStep = thisCase.stepMap[lineStep]
log.Warn("setupReplayTest", "LastStep", toPV(cs.privValidator).LastStep)
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
return cs, newBlockCh, lastMsg, walFile
}
func readTimedWALMessage(t *testing.T, walMsg string) TimedWALMessage {
var err error
var msg TimedWALMessage
wire.ReadJSON(&msg, []byte(walMsg), &err)
if err != nil {
t.Fatalf("Error reading json data: %v", err)
}
return msg
}
//-----------------------------------------------
// Test the log at every iteration, and set the privVal last step
// as if the log was written after signing, before the crash
func TestWALCrashAfterWrite(t *testing.T) {
for _, thisCase := range testCases {
split := strings.Split(thisCase.log, "\n")
for i := 0; i < len(split)-1; i++ {
cs, newBlockCh, _, walFile := setupReplayTest(thisCase, i+1, true)
runReplayTest(t, cs, walFile, newBlockCh, thisCase, i+1)
}
}
}
//-----------------------------------------------
// Test the log as if we crashed after signing but before writing.
// This relies on privValidator.LastSignature being set
func TestWALCrashBeforeWritePropose(t *testing.T) {
for _, thisCase := range testCases {
lineNum := thisCase.proposeLine
// setup replay test where last message is a proposal
cs, newBlockCh, proposalMsg, walFile := setupReplayTest(thisCase, lineNum, false)
msg := readTimedWALMessage(t, proposalMsg)
proposal := msg.Msg.(msgInfo).Msg.(*ProposalMessage)
// Set LastSig
toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, proposal.Proposal)
toPV(cs.privValidator).LastSignature = proposal.Proposal.Signature
runReplayTest(t, cs, walFile, newBlockCh, thisCase, lineNum)
}
}
func TestWALCrashBeforeWritePrevote(t *testing.T) {
for _, thisCase := range testCases {
testReplayCrashBeforeWriteVote(t, thisCase, thisCase.prevoteLine, types.EventStringCompleteProposal())
}
}
func TestWALCrashBeforeWritePrecommit(t *testing.T) {
for _, thisCase := range testCases {
testReplayCrashBeforeWriteVote(t, thisCase, thisCase.precommitLine, types.EventStringPolka())
}
}
func testReplayCrashBeforeWriteVote(t *testing.T, thisCase *testCase, lineNum int, eventString string) {
// setup replay test where last message is a vote
cs, newBlockCh, voteMsg, walFile := setupReplayTest(thisCase, lineNum, false)
types.AddListenerForEvent(cs.evsw, "tester", eventString, func(data types.TMEventData) {
msg := readTimedWALMessage(t, voteMsg)
vote := msg.Msg.(msgInfo).Msg.(*VoteMessage)
// Set LastSig
toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, vote.Vote)
toPV(cs.privValidator).LastSignature = vote.Vote.Signature
})
runReplayTest(t, cs, walFile, newBlockCh, thisCase, lineNum)
}
//------------------------------------------------------------------------------------------
// Handshake Tests
var (
NUM_BLOCKS = 6 // number of blocks in the test_data/many_blocks.cswal
mempool = types.MockMempool{}
testPartSize int
)
//---------------------------------------
// Test handshake/replay
// 0 - all synced up
// 1 - saved block but app and state are behind
// 2 - save block and committed but state is behind
var modes = []uint{0, 1, 2}
// Sync from scratch
func TestHandshakeReplayAll(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, 0, m)
}
}
// Sync many, not from scratch
func TestHandshakeReplaySome(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, 1, m)
}
}
// Sync from lagging by one
func TestHandshakeReplayOne(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, NUM_BLOCKS-1, m)
}
}
// Sync from caught up
func TestHandshakeReplayNone(t *testing.T) {
for _, m := range modes {
testHandshakeReplay(t, NUM_BLOCKS, m)
}
}
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
config := tendermint_test.ResetConfig("proxy_test_")
// copy the many_blocks file
walBody, err := cmn.ReadFile(path.Join(data_dir, "many_blocks.cswal"))
if err != nil {
t.Fatal(err)
}
walFile := writeWAL(string(walBody))
config.Set("cs_wal_file", walFile)
privVal := types.LoadPrivValidator(config.GetString("priv_validator_file"))
testPartSize = config.GetInt("block_part_size")
wal, err := NewWAL(walFile, false)
if err != nil {
t.Fatal(err)
}
chain, commits, err := makeBlockchainFromWAL(wal)
if err != nil {
t.Fatalf(err.Error())
}
state, store := stateAndStore(config, privVal.PubKey)
store.chain = chain
store.commits = commits
// run the chain through state.ApplyBlock to build up the tendermint state
latestAppHash := buildTMStateFromChain(config, state, chain, mode)
// make a new client creator
dummyApp := dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "2"))
clientCreator2 := proxy.NewLocalClientCreator(dummyApp)
if nBlocks > 0 {
// run nBlocks against a new client to build up the app state.
// use a throwaway tendermint state
proxyApp := proxy.NewAppConns(config, clientCreator2, nil)
state, _ := stateAndStore(config, privVal.PubKey)
buildAppStateFromChain(proxyApp, state, chain, nBlocks, mode)
}
// now start the app using the handshake - it should sync
handshaker := NewHandshaker(config, state, store)
proxyApp := proxy.NewAppConns(config, clientCreator2, handshaker)
if _, err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
// get the latest app hash from the app
res, err := proxyApp.Query().InfoSync()
if err != nil {
t.Fatal(err)
}
// the app hash should be synced up
if !bytes.Equal(latestAppHash, res.LastBlockAppHash) {
t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash)
}
expectedBlocksToSync := NUM_BLOCKS - nBlocks
if nBlocks == NUM_BLOCKS && mode > 0 {
expectedBlocksToSync += 1
} else if nBlocks > 0 && mode == 1 {
expectedBlocksToSync += 1
}
if handshaker.NBlocks() != expectedBlocksToSync {
t.Fatalf("Expected handshake to sync %d blocks, got %d", expectedBlocksToSync, handshaker.NBlocks())
}
}
func applyBlock(st *sm.State, blk *types.Block, proxyApp proxy.AppConns) {
err := st.ApplyBlock(nil, proxyApp.Consensus(), blk, blk.MakePartSet(testPartSize).Header(), mempool)
if err != nil {
panic(err)
}
wal.exists = true
cs.wal = wal
}
func buildAppStateFromChain(proxyApp proxy.AppConns,
state *sm.State, chain []*types.Block, nBlocks int, mode uint) {
// start a new app without handshake, play nBlocks blocks
if _, err := proxyApp.Start(); err != nil {
panic(err)
}
defer proxyApp.Stop()
switch mode {
case 0:
for i := 0; i < nBlocks; i++ {
block := chain[i]
applyBlock(state, block, proxyApp)
}
case 1, 2:
for i := 0; i < nBlocks-1; i++ {
block := chain[i]
applyBlock(state, block, proxyApp)
}
if mode == 2 {
// update the dummy height and apphash
// as if we ran commit but not
applyBlock(state, chain[nBlocks-1], proxyApp)
}
}
}
func buildTMStateFromChain(config cfg.Config, state *sm.State, chain []*types.Block, mode uint) []byte {
// run the whole chain against this client to build up the tendermint state
clientCreator := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.GetString("db_dir"), "1")))
proxyApp := proxy.NewAppConns(config, clientCreator, nil) // sm.NewHandshaker(config, state, store, ReplayLastBlock))
if _, err := proxyApp.Start(); err != nil {
panic(err)
}
defer proxyApp.Stop()
var latestAppHash []byte
switch mode {
case 0:
// sync right up
for _, block := range chain {
applyBlock(state, block, proxyApp)
}
latestAppHash = state.AppHash
case 1, 2:
// sync up to the penultimate as if we stored the block.
// whether we commit or not depends on the appHash
for _, block := range chain[:len(chain)-1] {
applyBlock(state, block, proxyApp)
}
// apply the final block to a state copy so we can
// get the right next appHash but keep the state back
stateCopy := state.Copy()
applyBlock(stateCopy, chain[len(chain)-1], proxyApp)
latestAppHash = stateCopy.AppHash
}
return latestAppHash
}
//--------------------------
// utils for making blocks
func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) {
// Search for height marker
gr, found, err := wal.group.Search("#ENDHEIGHT: ", makeHeightSearchFunc(0))
if err != nil {
return nil, nil, err
}
if !found {
return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1))
}
defer gr.Close()
log.Notice("Build a blockchain by reading from the WAL")
var blockParts *types.PartSet
var blocks []*types.Block
var commits []*types.Commit
for {
line, err := gr.ReadLine()
if err != nil {
if err == io.EOF {
break
} else {
return nil, nil, err
}
}
piece, err := readPieceFromWAL([]byte(line))
if err != nil {
return nil, nil, err
}
if piece == nil {
continue
}
switch p := piece.(type) {
case *types.PartSetHeader:
// if its not the first one, we have a full block
if blockParts != nil {
var n int
block := wire.ReadBinary(&types.Block{}, blockParts.GetReader(), types.MaxBlockSize, &n, &err).(*types.Block)
blocks = append(blocks, block)
}
blockParts = types.NewPartSetFromHeader(*p)
case *types.Part:
_, err := blockParts.AddPart(p, false)
if err != nil {
return nil, nil, err
}
case *types.Vote:
if p.Type == types.VoteTypePrecommit {
commit := &types.Commit{
BlockID: p.BlockID,
Precommits: []*types.Vote{p},
}
commits = append(commits, commit)
}
}
}
// grab the last block too
var n int
block := wire.ReadBinary(&types.Block{}, blockParts.GetReader(), types.MaxBlockSize, &n, &err).(*types.Block)
blocks = append(blocks, block)
return blocks, commits, nil
}
func readPieceFromWAL(msgBytes []byte) (interface{}, error) {
// Skip over empty and meta lines
if len(msgBytes) == 0 || msgBytes[0] == '#' {
return nil, nil
}
var err error
var msg TimedWALMessage
wire.ReadJSON(&msg, msgBytes, &err)
if err != nil {
fmt.Println("MsgBytes:", msgBytes, string(msgBytes))
return nil, fmt.Errorf("Error reading json data: %v", err)
}
// for logging
switch m := msg.Msg.(type) {
case msgInfo:
switch msg := m.Msg.(type) {
case *ProposalMessage:
return &msg.Proposal.BlockPartsHeader, nil
case *BlockPartMessage:
return msg.Part, nil
case *VoteMessage:
return msg.Vote, nil
}
}
return nil, nil
}
// make some bogus txs
func txsFunc(blockNum int) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(blockNum), byte(i)}))
}
return txs
}
// sign a commit vote
func signCommit(chainID string, privVal *types.PrivValidator, height, round int, hash []byte, header types.PartSetHeader) *types.Vote {
vote := &types.Vote{
ValidatorIndex: 0,
ValidatorAddress: privVal.Address,
Height: height,
Round: round,
Type: types.VoteTypePrecommit,
BlockID: types.BlockID{hash, header},
}
sig := privVal.Sign(types.SignBytes(chainID, vote))
vote.Signature = sig
return vote
}
// make a blockchain with one validator
func makeBlockchain(t *testing.T, chainID string, nBlocks int, privVal *types.PrivValidator, proxyApp proxy.AppConns, state *sm.State) (blockchain []*types.Block, commits []*types.Commit) {
prevHash := state.LastBlockID.Hash
lastCommit := new(types.Commit)
prevParts := types.PartSetHeader{}
valHash := state.Validators.Hash()
prevBlockID := types.BlockID{prevHash, prevParts}
for i := 1; i < nBlocks+1; i++ {
block, parts := types.MakeBlock(i, chainID, txsFunc(i), lastCommit,
prevBlockID, valHash, state.AppHash, testPartSize)
fmt.Println(i)
fmt.Println(block.LastBlockID)
err := state.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), mempool)
if err != nil {
t.Fatal(i, err)
}
voteSet := types.NewVoteSet(chainID, i, 0, types.VoteTypePrecommit, state.Validators)
vote := signCommit(chainID, privVal, i, 0, block.Hash(), parts.Header())
_, err = voteSet.AddVote(vote)
if err != nil {
t.Fatal(err)
}
prevHash = block.Hash()
prevParts = parts.Header()
lastCommit = voteSet.MakeCommit()
prevBlockID = types.BlockID{prevHash, prevParts}
blockchain = append(blockchain, block)
commits = append(commits, lastCommit)
}
return blockchain, commits
}
// fresh state and mock store
func stateAndStore(config cfg.Config, pubKey crypto.PubKey) (*sm.State, *mockBlockStore) {
stateDB := dbm.NewMemDB()
return sm.MakeGenesisState(stateDB, &types.GenesisDoc{
ChainID: config.GetString("chain_id"),
Validators: []types.GenesisValidator{
types.GenesisValidator{pubKey, 10000, "test"},
},
AppHash: nil,
}), NewMockBlockStore(config)
}
//----------------------------------
// mock block store
type mockBlockStore struct {
config cfg.Config
chain []*types.Block
commits []*types.Commit
}
// TODO: NewBlockStore(db.NewMemDB) ...
func NewMockBlockStore(config cfg.Config) *mockBlockStore {
return &mockBlockStore{config, nil, nil}
}
func (bs *mockBlockStore) Height() int { return len(bs.chain) }
func (bs *mockBlockStore) LoadBlock(height int) *types.Block { return bs.chain[height-1] }
func (bs *mockBlockStore) LoadBlockMeta(height int) *types.BlockMeta {
block := bs.chain[height-1]
return &types.BlockMeta{
BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.config.GetInt("block_part_size")).Header()},
Header: block.Header,
}
}
func (bs *mockBlockStore) LoadBlockPart(height int, index int) *types.Part { return nil }
func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
}
func (bs *mockBlockStore) LoadBlockCommit(height int) *types.Commit {
return bs.commits[height-1]
}
func (bs *mockBlockStore) LoadSeenCommit(height int) *types.Commit {
return bs.commits[height-1]
}

File diff suppressed because it is too large Load Diff

View File

@@ -6,9 +6,8 @@ import (
"testing"
"time"
"github.com/tendermint/tendermint/config/tendermint_test"
//"github.com/tendermint/go-events"
. "github.com/tendermint/go-common"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/tendermint/types"
)
@@ -31,7 +30,7 @@ x * TestBadProposal - 2 vals, bad proposal (bad block state hash), should prevot
FullRoundSuite
x * TestFullRound1 - 1 val, full successful round
x * TestFullRoundNil - 1 val, full round of nil
x * TestFullRound2 - 2 vals, both required for fuill round
x * TestFullRound2 - 2 vals, both required for full round
LockSuite
x * TestLockNoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first.
x * TestLockPOLRelock - 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
@@ -66,21 +65,21 @@ func TestProposerSelection0(t *testing.T) {
<-newRoundCh
// lets commit a block and ensure proposer for the next height is correct
prop := cs1.GetRoundState().Validators.Proposer()
if !bytes.Equal(prop.Address, cs1.privValidator.Address) {
panic(Fmt("expected proposer to be validator %d. Got %X", 0, prop.Address))
prop := cs1.GetRoundState().Validators.GetProposer()
if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) {
t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address)
}
// wait for complete proposal
<-proposalCh
rs := cs1.GetRoundState()
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), nil, vss[1:]...)
signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...)
// wait for new round so next validator is set
<-newRoundCh
prop = cs1.GetRoundState().Validators.Proposer()
prop = cs1.GetRoundState().Validators.GetProposer()
if !bytes.Equal(prop.Address, vss[1].Address) {
panic(Fmt("expected proposer to be validator %d. Got %X", 1, prop.Address))
}
@@ -101,13 +100,13 @@ func TestProposerSelection2(t *testing.T) {
// everyone just votes nil. we get a new proposer each round
for i := 0; i < len(vss); i++ {
prop := cs1.GetRoundState().Validators.Proposer()
prop := cs1.GetRoundState().Validators.GetProposer()
if !bytes.Equal(prop.Address, vss[(i+2)%len(vss)].Address) {
panic(Fmt("expected proposer to be validator %d. Got %X", (i+2)%len(vss), prop.Address))
}
rs := cs1.GetRoundState()
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, rs.ProposalBlockParts.Header(), nil, vss[1:]...)
signAddVotes(cs1, types.VoteTypePrecommit, nil, rs.ProposalBlockParts.Header(), vss[1:]...)
<-newRoundCh // wait for the new round event each round
incrementRound(vss[1:]...)
@@ -180,12 +179,14 @@ func TestEnterProposeYesPrivValidator(t *testing.T) {
func TestBadProposal(t *testing.T) {
cs1, vss := randConsensusState(2)
height, round := cs1.Height, cs1.Round
cs2 := vss[1]
vs2 := vss[1]
partSize := config.GetInt("block_part_size")
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, cs2)
propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, vs2)
// make the second validator the proposer by incrementing round
round = round + 1
@@ -198,10 +199,10 @@ func TestBadProposal(t *testing.T) {
}
stateHash[0] = byte((stateHash[0] + 1) % 255)
propBlock.AppHash = stateHash
propBlockParts := propBlock.MakePartSet()
proposal := types.NewProposal(cs2.Height, round, propBlockParts.Header(), -1)
if err := cs2.SignProposal(config.GetString("chain_id"), proposal); err != nil {
panic("failed to sign bad proposal: " + err.Error())
propBlockParts := propBlock.MakePartSet(partSize)
proposal := types.NewProposal(vs2.Height, round, propBlockParts.Header(), -1, types.BlockID{})
if err := vs2.SignProposal(config.GetString("chain_id"), proposal); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
// set the proposal block
@@ -218,14 +219,15 @@ func TestBadProposal(t *testing.T) {
validatePrevote(t, cs1, round, vss[0], nil)
// add bad prevote from cs2 and wait for it
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header(), voteCh)
// add bad prevote from vs2 and wait for it
signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh
// wait for precommit
<-voteCh
validatePrecommit(t, cs1, round, 0, vss[0], nil, nil)
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header(), voteCh)
signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
}
//----------------------------------------------------------------------------------------------------
@@ -236,7 +238,7 @@ func TestFullRound1(t *testing.T) {
cs, vss := randConsensusState(1)
height, round := cs.Height, cs.Round
voteCh := subscribeToEvent(cs.evsw, "tester", types.EventStringVote(), 1)
voteCh := subscribeToEvent(cs.evsw, "tester", types.EventStringVote(), 0)
propCh := subscribeToEvent(cs.evsw, "tester", types.EventStringCompleteProposal(), 1)
newRoundCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewRound(), 1)
@@ -249,6 +251,8 @@ func TestFullRound1(t *testing.T) {
propBlockHash := re.(types.EventDataRoundState).RoundState.(*RoundState).ProposalBlock.Hash()
<-voteCh // wait for prevote
// NOTE: voteChan cap of 0 ensures we can complete this
// before consensus can move to the next height (and cause a race condition)
validatePrevote(t, cs, round, vss[0], propBlockHash)
<-voteCh // wait for precommit
@@ -280,7 +284,7 @@ func TestFullRoundNil(t *testing.T) {
// where the first validator has to wait for votes from the second
func TestFullRound2(t *testing.T) {
cs1, vss := randConsensusState(2)
cs2 := vss[1]
vs2 := vss[1]
height, round := cs1.Height, cs1.Round
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
@@ -295,8 +299,9 @@ func TestFullRound2(t *testing.T) {
rs := cs1.GetRoundState()
propBlockHash, propPartsHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header()
// prevote arrives from cs2:
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, propBlockHash, propPartsHeader, voteCh)
// prevote arrives from vs2:
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propPartsHeader, vs2)
<-voteCh
<-voteCh //precommit
@@ -305,8 +310,9 @@ func TestFullRound2(t *testing.T) {
// we should be stuck in limbo waiting for more precommits
// precommit arrives from cs2:
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, propBlockHash, propPartsHeader, voteCh)
// precommit arrives from vs2:
signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propPartsHeader, vs2)
<-voteCh
// wait to finish commit, propose in next height
<-newBlockCh
@@ -319,9 +325,11 @@ func TestFullRound2(t *testing.T) {
// two vals take turns proposing. val1 locks on first one, precommits nil on everything else
func TestLockNoPOL(t *testing.T) {
cs1, vss := randConsensusState(2)
cs2 := vss[1]
vs2 := vss[1]
height := cs1.Height
partSize := config.GetInt("block_part_size")
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
@@ -343,8 +351,9 @@ func TestLockNoPOL(t *testing.T) {
<-voteCh // prevote
// we should now be stuck in limbo forever, waiting for more prevotes
// prevote arrives from cs2:
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), voteCh)
// prevote arrives from vs2:
signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2)
<-voteCh // prevote
<-voteCh // precommit
@@ -357,7 +366,8 @@ func TestLockNoPOL(t *testing.T) {
hash := make([]byte, len(theBlockHash))
copy(hash, theBlockHash)
hash[0] = byte((hash[0] + 1) % 255)
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh)
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh // precommit
// (note we're entering precommit for a second time this round)
// but with invalid args. then we enterPrecommitWait, and the timeout to new round
@@ -371,7 +381,7 @@ func TestLockNoPOL(t *testing.T) {
Round2 (cs1, B) // B B2
*/
incrementRound(cs2)
incrementRound(vs2)
// now we're on a new round and not the proposer, so wait for timeout
re = <-timeoutProposeCh
@@ -388,7 +398,8 @@ func TestLockNoPOL(t *testing.T) {
validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash())
// add a conflicting prevote from the other validator
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh)
signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh
// now we're going to enter prevote again, but with invalid args
// and then prevote wait, which should timeout. then wait for precommit
@@ -400,9 +411,10 @@ func TestLockNoPOL(t *testing.T) {
// we should precommit nil and be locked on the proposal
validatePrecommit(t, cs1, 1, 0, vss[0], nil, theBlockHash)
// add conflicting precommit from cs2
// add conflicting precommit from vs2
// NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh)
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh
// (note we're entering precommit for a second time this round, but with invalid args
// then we enterPrecommitWait and timeout into NewRound
@@ -411,10 +423,10 @@ func TestLockNoPOL(t *testing.T) {
<-newRoundCh
log.Notice("#### ONTO ROUND 2")
/*
Round3 (cs2, _) // B, B2
Round3 (vs2, _) // B, B2
*/
incrementRound(cs2)
incrementRound(vs2)
re = <-proposalCh
rs = re.(types.EventDataRoundState).RoundState.(*RoundState)
@@ -428,33 +440,36 @@ func TestLockNoPOL(t *testing.T) {
validatePrevote(t, cs1, 2, vss[0], rs.LockedBlock.Hash())
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh)
signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh
<-timeoutWaitCh // prevote wait
<-voteCh // precommit
validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, hash, rs.ProposalBlock.MakePartSet().Header(), voteCh) // NOTE: conflicting precommits at same height
validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
<-voteCh
<-timeoutWaitCh
// before we time out into new round, set next proposal block
prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
if prop == nil || propBlock == nil {
panic("Failed to create proposal block with cs2")
t.Fatal("Failed to create proposal block with vs2")
}
incrementRound(cs2)
incrementRound(vs2)
<-newRoundCh
log.Notice("#### ONTO ROUND 3")
/*
Round4 (cs2, C) // B C // B C
Round4 (vs2, C) // B C // B C
*/
// now we're on a new round and not the proposer
// so set the proposal block
cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(), "")
cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), "")
<-proposalCh
<-voteCh // prevote
@@ -462,19 +477,24 @@ func TestLockNoPOL(t *testing.T) {
// prevote for locked block (not proposal)
validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash())
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header(), voteCh)
signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh
<-timeoutWaitCh
<-voteCh
validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, propBlock.Hash(), propBlock.MakePartSet().Header(), voteCh) // NOTE: conflicting precommits at same height
validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal
signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
<-voteCh
}
// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
func TestLockPOLRelock(t *testing.T) {
cs1, vss := randConsensusState(4)
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
@@ -483,14 +503,14 @@ func TestLockPOLRelock(t *testing.T) {
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlockHeader(), 1)
log.Debug("cs2 last round", "lr", cs2.PrivValidator.LastRound)
log.Debug("vs2 last round", "lr", vs2.PrivValidator.LastRound)
// everything done from perspective of cs1
/*
Round1 (cs1, B) // B B B B// B nil B nil
eg. cs2 and cs4 didn't see the 2/3 prevotes
eg. vs2 and vs4 didn't see the 2/3 prevotes
*/
// start round and wait for propose and prevote
@@ -500,26 +520,27 @@ func TestLockPOLRelock(t *testing.T) {
re := <-proposalCh
rs := re.(types.EventDataRoundState).RoundState.(*RoundState)
theBlockHash := rs.ProposalBlock.Hash()
theBlockPartsHeader := rs.ProposalBlockParts.Header()
<-voteCh // prevote
signAddVoteToFromMany(types.VoteTypePrevote, cs1, theBlockHash, theBlockPartsHeader, voteCh, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
_, _, _ = <-voteCh, <-voteCh, <-voteCh // prevotes
<-voteCh // our precommit
// the proposed block should now be locked and our precommit added
validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash)
// add precommits from the rest
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, voteCh, cs2, cs4)
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs3, theBlockHash, theBlockPartsHeader, voteCh)
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
_, _, _ = <-voteCh, <-voteCh, <-voteCh // precommits
// before we timeout to the new round set the new proposal
prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
propBlockParts := propBlock.MakePartSet()
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
propBlockParts := propBlock.MakePartSet(partSize)
propBlockHash := propBlock.Hash()
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
// timeout to new round
<-timeoutWaitCh
@@ -531,7 +552,7 @@ func TestLockPOLRelock(t *testing.T) {
log.Notice("### ONTO ROUND 1")
/*
Round2 (cs2, C) // B C C C // C C C _)
Round2 (vs2, C) // B C C C // C C C _)
cs1 changes lock!
*/
@@ -549,7 +570,8 @@ func TestLockPOLRelock(t *testing.T) {
validatePrevote(t, cs1, 0, vss[0], theBlockHash)
// now lets add prevotes from everyone else for the new block
signAddVoteToFromMany(types.VoteTypePrevote, cs1, propBlockHash, propBlockParts.Header(), voteCh, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
_, _, _ = <-voteCh, <-voteCh, <-voteCh // prevotes
// now either we go to PrevoteWait or Precommit
select {
@@ -563,7 +585,8 @@ func TestLockPOLRelock(t *testing.T) {
// we should have unlocked and locked on the new block
validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, propBlockHash, propBlockParts.Header(), voteCh, cs2, cs3)
signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propBlockParts.Header(), vs2, vs3)
_, _ = <-voteCh, <-voteCh
be := <-newBlockCh
b := be.(types.EventDataNewBlockHeader)
@@ -581,14 +604,16 @@ func TestLockPOLRelock(t *testing.T) {
// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
func TestLockPOLUnlock(t *testing.T) {
cs1, vss := randConsensusState(4)
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
unlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringUnlock(), 1)
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// everything done from perspective of cs1
@@ -607,7 +632,7 @@ func TestLockPOLUnlock(t *testing.T) {
<-voteCh // prevote
signAddVoteToFromMany(types.VoteTypePrevote, cs1, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), nil, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
<-voteCh //precommit
@@ -617,14 +642,14 @@ func TestLockPOLUnlock(t *testing.T) {
rs = cs1.GetRoundState()
// add precommits from the rest
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs4)
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs3, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), nil)
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
// before we time out into new round, set next proposal block
prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
propBlockParts := propBlock.MakePartSet()
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
propBlockParts := propBlock.MakePartSet(partSize)
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
// timeout to new round
re = <-timeoutWaitCh
@@ -637,7 +662,7 @@ func TestLockPOLUnlock(t *testing.T) {
<-newRoundCh
log.Notice("#### ONTO ROUND 1")
/*
Round2 (cs2, C) // B nil nil nil // nil nil nil _
Round2 (vs2, C) // B nil nil nil // nil nil nil _
cs1 unlocks!
*/
@@ -654,7 +679,7 @@ func TestLockPOLUnlock(t *testing.T) {
<-voteCh
validatePrevote(t, cs1, 0, vss[0], lockedBlockHash)
// now lets add prevotes from everyone else for nil (a polka!)
signAddVoteToFromMany(types.VoteTypePrevote, cs1, nil, types.PartSetHeader{}, nil, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4)
// the polka makes us unlock and precommit nil
<-unlockCh
@@ -664,7 +689,7 @@ func TestLockPOLUnlock(t *testing.T) {
// NOTE: since we don't relock on nil, the lock round is 0
validatePrecommit(t, cs1, 1, 0, vss[0], nil, nil)
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs3)
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3)
<-newRoundCh
}
@@ -674,13 +699,15 @@ func TestLockPOLUnlock(t *testing.T) {
// then we see the polka from round 1 but shouldn't unlock
func TestLockPOLSafety1(t *testing.T) {
cs1, vss := randConsensusState(4)
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// start round and wait for propose and prevote
startTestRound(cs1, cs1.Height, 0)
@@ -694,7 +721,7 @@ func TestLockPOLSafety1(t *testing.T) {
validatePrevote(t, cs1, 0, vss[0], propBlock.Hash())
// the others sign a polka but we don't see it
prevotes := signVoteMany(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet().Header(), cs2, cs3, cs4)
prevotes := signVotes(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4)
// before we time out into new round, set next proposer
// and next proposal block
@@ -708,13 +735,13 @@ func TestLockPOLSafety1(t *testing.T) {
log.Warn("old prop", "hash", fmt.Sprintf("%X", propBlock.Hash()))
// we do see them precommit nil
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4)
prop, propBlock := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
propBlockHash := propBlock.Hash()
propBlockParts := propBlock.MakePartSet()
propBlockParts := propBlock.MakePartSet(partSize)
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
//XXX: this isnt gauranteed to get there before the timeoutPropose ...
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer")
@@ -745,18 +772,18 @@ func TestLockPOLSafety1(t *testing.T) {
validatePrevote(t, cs1, 1, vss[0], propBlockHash)
// now we see the others prevote for it, so we should lock on it
signAddVoteToFromMany(types.VoteTypePrevote, cs1, propBlockHash, propBlockParts.Header(), nil, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
<-voteCh // precommit
// we should have precommitted
validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs3)
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3)
<-timeoutWaitCh
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
<-newRoundCh
@@ -777,7 +804,7 @@ func TestLockPOLSafety1(t *testing.T) {
newStepCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRoundStep(), 1)
// add prevotes from the earlier round
addVoteToFromMany(cs1, prevotes, cs2, cs3, cs4)
addVotes(cs1, prevotes...)
log.Warn("Done adding prevotes!")
@@ -793,30 +820,33 @@ func TestLockPOLSafety1(t *testing.T) {
// dont see P0, lock on P1 at R1, dont unlock using P0 at R2
func TestLockPOLSafety2(t *testing.T) {
cs1, vss := randConsensusState(4)
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
unlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringUnlock(), 1)
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// the block for R0: gets polkad but we miss it
// (even though we signed it, shhh)
_, propBlock0 := decideProposal(cs1, vss[0], cs1.Height, cs1.Round)
propBlockHash0 := propBlock0.Hash()
propBlockParts0 := propBlock0.MakePartSet()
propBlockParts0 := propBlock0.MakePartSet(partSize)
// the others sign a polka but we don't see it
prevotes := signVoteMany(types.VoteTypePrevote, propBlockHash0, propBlockParts0.Header(), cs2, cs3, cs4)
prevotes := signVotes(types.VoteTypePrevote, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4)
// the block for round 1
prop1, propBlock1 := decideProposal(cs1, cs2, cs2.Height, cs2.Round+1)
prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
propBlockHash1 := propBlock1.Hash()
propBlockParts1 := propBlock1.MakePartSet()
propBlockParts1 := propBlock1.MakePartSet(partSize)
propBlockID1 := types.BlockID{propBlockHash1, propBlockParts1.Header()}
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
cs1.updateRoundStep(0, RoundStepPrecommitWait)
@@ -831,28 +861,30 @@ func TestLockPOLSafety2(t *testing.T) {
<-voteCh // prevote
signAddVoteToFromMany(types.VoteTypePrevote, cs1, propBlockHash1, propBlockParts1.Header(), nil, cs2, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4)
<-voteCh // precommit
// the proposed block should now be locked and our precommit added
validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash1, propBlockHash1)
// add precommits from the rest
signAddVoteToFromMany(types.VoteTypePrecommit, cs1, nil, types.PartSetHeader{}, nil, cs2, cs4)
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs3, propBlockHash1, propBlockParts1.Header(), nil)
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash1, propBlockParts1.Header(), vs3)
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
// timeout of precommit wait to new round
<-timeoutWaitCh
// in round 2 we see the polkad block from round 0
newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0)
if err := cs3.SignProposal(config.GetString("chain_id"), newProp); err != nil {
panic(err)
newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0, propBlockID1)
if err := vs3.SignProposal(config.GetString("chain_id"), newProp); err != nil {
t.Fatal(err)
}
cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer")
addVoteToFromMany(cs1, prevotes, cs2, cs3, cs4) // add the pol votes
// Add the pol votes
addVotes(cs1, prevotes...)
<-newRoundCh
log.Notice("### ONTO Round 2")
@@ -883,13 +915,13 @@ func TestLockPOLSafety2(t *testing.T) {
/*
func TestSlashingPrevotes(t *testing.T) {
cs1, vss := randConsensusState(2)
cs2 := vss[1]
vs2 := vss[1]
proposalCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringCompleteProposal() , 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringTimeoutWait() , 1)
newRoundCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringNewRound() , 1)
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// start round and wait for propose and prevote
startTestRound(cs1, cs1.Height, 0)
@@ -903,7 +935,7 @@ func TestSlashingPrevotes(t *testing.T) {
// add one for a different block should cause us to go into prevote wait
hash := rs.ProposalBlock.Hash()
hash[0] = byte(hash[0]+1) % 255
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, hash, rs.ProposalBlockParts.Header(), nil)
signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlockParts.Header(), vs2)
<-timeoutWaitCh
@@ -911,20 +943,20 @@ func TestSlashingPrevotes(t *testing.T) {
// away and ignore more prevotes (and thus fail to slash!)
// add the conflicting vote
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),nil)
signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
// XXX: Check for existence of Dupeout info
}
func TestSlashingPrecommits(t *testing.T) {
cs1, vss := randConsensusState(2)
cs2 := vss[1]
vs2 := vss[1]
proposalCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringCompleteProposal() , 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringTimeoutWait() , 1)
newRoundCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringNewRound() , 1)
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// start round and wait for propose and prevote
startTestRound(cs1, cs1.Height, 0)
@@ -932,8 +964,8 @@ func TestSlashingPrecommits(t *testing.T) {
re := <-proposalCh
<-voteCh // prevote
// add prevote from cs2
signAddVoteToFrom(types.VoteTypePrevote, cs1, cs2, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), nil)
// add prevote from vs2
signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
<-voteCh // precommit
@@ -941,13 +973,13 @@ func TestSlashingPrecommits(t *testing.T) {
// add one for a different block should cause us to go into prevote wait
hash := rs.ProposalBlock.Hash()
hash[0] = byte(hash[0]+1) % 255
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, hash, rs.ProposalBlockParts.Header(),nil)
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlockParts.Header(), vs2)
// NOTE: we have to send the vote for different block first so we don't just go into precommit round right
// away and ignore more prevotes (and thus fail to slash!)
// add precommit from cs2
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),nil)
// add precommit from vs2
signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
// XXX: Check for existence of Dupeout info
}
@@ -963,13 +995,15 @@ func TestSlashingPrecommits(t *testing.T) {
// we receive a final precommit after going into next round, but others might have gone to commit already!
func TestHalt1(t *testing.T) {
cs1, vss := randConsensusState(4)
cs2, cs3, cs4 := vss[1], vss[2], vss[3]
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
partSize := config.GetInt("block_part_size")
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlock(), 1)
voteCh := subscribeToVoter(cs1, cs1.privValidator.Address)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// start round and wait for propose and prevote
startTestRound(cs1, cs1.Height, 0)
@@ -977,23 +1011,23 @@ func TestHalt1(t *testing.T) {
re := <-proposalCh
rs := re.(types.EventDataRoundState).RoundState.(*RoundState)
propBlock := rs.ProposalBlock
propBlockParts := propBlock.MakePartSet()
propBlockParts := propBlock.MakePartSet(partSize)
<-voteCh // prevote
signAddVoteToFromMany(types.VoteTypePrevote, cs1, propBlock.Hash(), propBlockParts.Header(), nil, cs3, cs4)
signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlockParts.Header(), vs3, vs4)
<-voteCh // precommit
// the proposed block should now be locked and our precommit added
validatePrecommit(t, cs1, 0, 0, vss[0], propBlock.Hash(), propBlock.Hash())
// add precommits from the rest
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs2, nil, types.PartSetHeader{}, nil) // didnt receive proposal
signAddVoteToFrom(types.VoteTypePrecommit, cs1, cs3, propBlock.Hash(), propBlockParts.Header(), nil)
// we receive this later, but cs3 might receive it earlier and with ours will go to commit!
precommit4 := signVote(cs4, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header())
signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2) // didnt receive proposal
signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header(), vs3)
// we receive this later, but vs3 might receive it earlier and with ours will go to commit!
precommit4 := signVote(vs4, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header())
incrementRound(cs2, cs3, cs4)
incrementRound(vs2, vs3, vs4)
// timeout to new round
<-timeoutWaitCh
@@ -1011,7 +1045,7 @@ func TestHalt1(t *testing.T) {
validatePrevote(t, cs1, 0, vss[0], rs.LockedBlock.Hash())
// now we receive the precommit from the previous round
addVoteToFrom(cs1, cs4, precommit4)
addVotes(cs1, precommit4)
// receiving that precommit should take us straight to commit
<-newBlockCh

View File

@@ -0,0 +1,36 @@
# Generating test data
To generate the data, run `build.sh`. See that script for more details.
Make sure to adjust the stepChanges in the testCases if the number of messages changes.
This sometimes happens for the `small_block2.cswal`, where the number of block parts changes between 4 and 5.
If you need to change the signatures, you can use a script as follows:
The privBytes comes from `config/tendermint_test/...`:
```
package main
import (
"encoding/hex"
"fmt"
"github.com/tendermint/go-crypto"
)
func main() {
signBytes, err := hex.DecodeString("7B22636861696E5F6964223A2274656E6465726D696E745F74657374222C22766F7465223A7B22626C6F636B5F68617368223A2242453544373939433846353044354645383533364334333932464443384537423342313830373638222C22626C6F636B5F70617274735F686561646572223A506172745365747B543A31204236323237323535464632307D2C22686569676874223A312C22726F756E64223A302C2274797065223A327D7D")
if err != nil {
panic(err)
}
privBytes, err := hex.DecodeString("27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8")
if err != nil {
panic(err)
}
privKey := crypto.PrivKeyEd25519{}
copy(privKey[:], privBytes)
signature := privKey.Sign(signBytes)
fmt.Printf("Signature Bytes: %X\n", signature.Bytes())
}
```

View File

@@ -0,0 +1,103 @@
#! /bin/bash
# XXX: removes tendermint dir
cd $GOPATH/src/github.com/tendermint/tendermint
# specify a dir to copy
# TODO: eventually we should replace with `tendermint init --test`
DIR=$HOME/.tendermint_test/consensus_state_test
rm -rf $HOME/.tendermint
cp -r $DIR $HOME/.tendermint
function reset(){
rm -rf $HOME/.tendermint/data
tendermint unsafe_reset_priv_validator
}
reset
# empty block
function empty_block(){
tendermint node --proxy_app=dummy &> /dev/null &
sleep 5
killall tendermint
# /q would print up to and including the match, then quit.
# /Q doesn't include the match.
# http://unix.stackexchange.com/questions/11305/grep-show-all-the-file-up-to-the-match
sed '/ENDHEIGHT: 1/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/empty_block.cswal
reset
}
# many blocks
function many_blocks(){
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
PID=$!
tendermint node --proxy_app=dummy &> /dev/null &
sleep 7
killall tendermint
kill -9 $PID
sed '/ENDHEIGHT: 6/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/many_blocks.cswal
reset
}
# small block 1
function small_block1(){
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
PID=$!
tendermint node --proxy_app=dummy &> /dev/null &
sleep 10
killall tendermint
kill -9 $PID
sed '/ENDHEIGHT: 1/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block1.cswal
reset
}
# small block 2 (part size = 512)
function small_block2(){
echo "" >> ~/.tendermint/config.toml
echo "block_part_size = 512" >> ~/.tendermint/config.toml
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
PID=$!
tendermint node --proxy_app=dummy &> /dev/null &
sleep 5
killall tendermint
kill -9 $PID
sed '/ENDHEIGHT: 1/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block2.cswal
reset
}
case "$1" in
"small_block1")
small_block1
;;
"small_block2")
small_block2
;;
"empty_block")
empty_block
;;
"many_blocks")
many_blocks
;;
*)
small_block1
small_block2
empty_block
many_blocks
esac

View File

@@ -0,0 +1,10 @@
#ENDHEIGHT: 0
{"time":"2016-12-18T05:05:33.502Z","msg":[3,{"duration":974084551,"height":1,"round":0,"step":1}]}
{"time":"2016-12-18T05:05:33.505Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2016-12-18T05:05:33.505Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"62C0F2BCCB491399EEDAF8E85837ADDD4E25BAB7A84BFC4F0E88594531FBC6D4755DEC7E6427F04AD7EB8BB89502762AB4380C7BBA93A4C297E6180EC78E3504"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:33.506Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F74657374010114914148D83E0DC00000000000000114354594CBFC1A7BCA1AD0050ED6AA010023EADA390001000100000000","proof":{"aunts":[]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:33.508Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
{"time":"2016-12-18T05:05:33.508Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":1,"block_id":{"hash":"3E83DF89A01C5F104912E095F32451C202F34717","parts":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"}},"signature":[1,"B64D0BB64B2E9AAFDD4EBEA679644F77AE774D69E3E2E1B042AB15FE4F84B1427AC6C8A25AFF58EA22011AE567FEA49D2EE7354382E915AD85BF40C58FA6130C"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:33.509Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2016-12-18T05:05:33.509Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"3E83DF89A01C5F104912E095F32451C202F34717","parts":{"total":1,"hash":"71D2DA2336A9F84C22A28FF6C67F35F3478FC0AF"}},"signature":[1,"D83E968392D1BF09821E0D05079DAB5491CABD89BE128BD1CF573ED87148BA84667A56C0A069EFC90760F25EDAC62BC324DBB12EA63F44E6CB2D3500FE5E640F"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:33.509Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,14 @@
#ENDHEIGHT: 0
{"time":"2016-12-18T05:05:43.641Z","msg":[3,{"duration":969409681,"height":1,"round":0,"step":1}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPropose"}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[17,{"Proposal":{"height":1,"round":0,"block_parts_header":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"},"pol_round":-1,"pol_block_id":{"hash":"","parts":{"total":0,"hash":""}},"signature":[1,"F1A8E9928889C68FD393F3983B5362AECA4A95AA13FE3C78569B2515EC046893CB718071CAF54F3F1507DCD851B37CD5557EA17BB5471D2DC6FB5AC5FBB72E02"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":0,"bytes":"0101010F74656E6465726D696E745F7465737401011491414B3483A8400190000000000114926EA77D30A4D19866159DE7E58AA9461F90F9D10114354594CBFC1A7BCA1AD0050ED6AA010023EADA3900010190010D6162636431323D646362613132010D6162636431333D646362613133010D6162636431343D646362613134010D6162636431353D646362613135010D6162636431363D646362613136010D6162636431373D646362613137010D6162636431383D646362613138010D6162636431393D646362613139010D6162636432303D646362613230010D6162636432313D646362613231010D6162636432323D646362613232010D6162636432333D646362613233010D6162636432343D646362613234010D6162636432353D646362613235010D6162636432363D646362613236010D6162636432373D646362613237010D6162636432383D646362613238010D6162636432393D646362613239010D6162636433303D646362613330010D6162636433313D646362613331010D6162636433323D646362613332010D6162636433333D646362613333010D6162636433343D646362613334010D6162636433353D646362613335010D6162636433363D646362613336010D6162636433373D646362613337010D6162636433383D646362613338010D6162636433393D646362613339010D6162636434303D","proof":{"aunts":["C9FBD66B63A976638196323F5B93494BDDFC9EED","47FD83BB7607E679EE5CF0783372D13C5A264056","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.643Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":1,"bytes":"646362613430010D6162636434313D646362613431010D6162636434323D646362613432010D6162636434333D646362613433010D6162636434343D646362613434010D6162636434353D646362613435010D6162636434363D646362613436010D6162636434373D646362613437010D6162636434383D646362613438010D6162636434393D646362613439010D6162636435303D646362613530010D6162636435313D646362613531010D6162636435323D646362613532010D6162636435333D646362613533010D6162636435343D646362613534010D6162636435353D646362613535010D6162636435363D646362613536010D6162636435373D646362613537010D6162636435383D646362613538010D6162636435393D646362613539010D6162636436303D646362613630010D6162636436313D646362613631010D6162636436323D646362613632010D6162636436333D646362613633010D6162636436343D646362613634010D6162636436353D646362613635010D6162636436363D646362613636010D6162636436373D646362613637010D6162636436383D646362613638010D6162636436393D646362613639010D6162636437303D646362613730010D6162636437313D646362613731010D6162636437323D646362613732010D6162636437333D646362613733010D6162636437343D6463","proof":{"aunts":["D7FB03B935B77C322064F8277823CDB5C7018597","47FD83BB7607E679EE5CF0783372D13C5A264056","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":2,"bytes":"62613734010D6162636437353D646362613735010D6162636437363D646362613736010D6162636437373D646362613737010D6162636437383D646362613738010D6162636437393D646362613739010D6162636438303D646362613830010D6162636438313D646362613831010D6162636438323D646362613832010D6162636438333D646362613833010D6162636438343D646362613834010D6162636438353D646362613835010D6162636438363D646362613836010D6162636438373D646362613837010D6162636438383D646362613838010D6162636438393D646362613839010D6162636439303D646362613930010D6162636439313D646362613931010D6162636439323D646362613932010D6162636439333D646362613933010D6162636439343D646362613934010D6162636439353D646362613935010D6162636439363D646362613936010D6162636439373D646362613937010D6162636439383D646362613938010D6162636439393D646362613939010F616263643130303D64636261313030010F616263643130313D64636261313031010F616263643130323D64636261313032010F616263643130333D64636261313033010F616263643130343D64636261313034010F616263643130353D64636261313035010F616263643130363D64636261313036010F616263643130373D64636261","proof":{"aunts":["A607D9BF5107E6C9FD19B6928D9CC7714B0730E4","FEEC97078A26B7F6057821C0660855170CC6F1D7"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":3,"bytes":"313037010F616263643130383D64636261313038010F616263643130393D64636261313039010F616263643131303D64636261313130010F616263643131313D64636261313131010F616263643131323D64636261313132010F616263643131333D64636261313133010F616263643131343D64636261313134010F616263643131353D64636261313135010F616263643131363D64636261313136010F616263643131373D64636261313137010F616263643131383D64636261313138010F616263643131393D64636261313139010F616263643132303D64636261313230010F616263643132313D64636261313231010F616263643132323D64636261313232010F616263643132333D64636261313233010F616263643132343D64636261313234010F616263643132353D64636261313235010F616263643132363D64636261313236010F616263643132373D64636261313237010F616263643132383D64636261313238010F616263643132393D64636261313239010F616263643133303D64636261313330010F616263643133313D64636261313331010F616263643133323D64636261313332010F616263643133333D64636261313333010F616263643133343D64636261313334010F616263643133353D64636261313335010F616263643133363D64636261313336010F616263643133373D646362613133","proof":{"aunts":["0FD794B3506B9E92CDE3703F7189D42167E77095","86D455F542DA79F5A764B9DABDEABF01F4BAB2AB"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.644Z","msg":[2,{"msg":[19,{"Height":1,"Round":0,"Part":{"index":4,"bytes":"37010F616263643133383D64636261313338010F616263643133393D64636261313339010F616263643134303D64636261313430010F616263643134313D64636261313431010F616263643134323D64636261313432010F616263643134333D64636261313433010F616263643134343D64636261313434010F616263643134353D64636261313435010F616263643134363D64636261313436010F616263643134373D64636261313437010F616263643134383D64636261313438010F616263643134393D64636261313439010F616263643135303D64636261313530010F616263643135313D64636261313531010F616263643135323D64636261313532010F616263643135333D64636261313533010F616263643135343D64636261313534010F616263643135353D646362613135350100000000","proof":{"aunts":["50CBDC078A660EAE3442BA355BE10EE0D04408D1","86D455F542DA79F5A764B9DABDEABF01F4BAB2AB"]}}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.645Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrevote"}]}
{"time":"2016-12-18T05:05:43.645Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":1,"block_id":{"hash":"6ADACDC2871C59A67337DAFD5045A982ED070C51","parts":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"}},"signature":[1,"E815E0A63B7EEE7894DE2D72372A7C393434AC8ACCC46B60C628910F73351806D55A59994F08B454BFD71EDAA0CA95733CA47E37FFDAF9AAA2431A8160176E01"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.647Z","msg":[1,{"height":1,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2016-12-18T05:05:43.647Z","msg":[2,{"msg":[20,{"Vote":{"validator_address":"D028C9981F7A87F3093672BF0D5B0E2A1B3ED456","validator_index":0,"height":1,"round":0,"type":2,"block_id":{"hash":"6ADACDC2871C59A67337DAFD5045A982ED070C51","parts":{"total":5,"hash":"C916905C3C444501DDDAA1BF52E959B7531E762E"}},"signature":[1,"9AAC3F3A118EE039EB460E9E5308D490D671C7490309BD5D62B5F392205C7E420DFDAF90F08294FF36BE8A9AA5CC203C1F2088B42D2BB8EE40A45F2BB5C54D0A"]}}],"peer_key":""}]}
{"time":"2016-12-18T05:05:43.648Z","msg":[1,{"height":1,"round":0,"step":"RoundStepCommit"}]}

126
consensus/ticker.go Normal file
View File

@@ -0,0 +1,126 @@
package consensus
import (
"time"
. "github.com/tendermint/go-common"
)
var (
tickTockBufferSize = 10
)
// TimeoutTicker is a timer that schedules timeouts
// conditional on the height/round/step in the timeoutInfo.
// The timeoutInfo.Duration may be non-positive.
type TimeoutTicker interface {
Start() (bool, error)
Stop() bool
Chan() <-chan timeoutInfo // on which to receive a timeout
ScheduleTimeout(ti timeoutInfo) // reset the timer
}
// timeoutTicker wraps time.Timer,
// scheduling timeouts only for greater height/round/step
// than what it's already seen.
// Timeouts are scheduled along the tickChan,
// and fired on the tockChan.
type timeoutTicker struct {
BaseService
timer *time.Timer
tickChan chan timeoutInfo
tockChan chan timeoutInfo
}
func NewTimeoutTicker() TimeoutTicker {
tt := &timeoutTicker{
timer: time.NewTimer(0),
tickChan: make(chan timeoutInfo, tickTockBufferSize),
tockChan: make(chan timeoutInfo, tickTockBufferSize),
}
tt.stopTimer() // don't want to fire until the first scheduled timeout
tt.BaseService = *NewBaseService(log, "TimeoutTicker", tt)
return tt
}
func (t *timeoutTicker) OnStart() error {
go t.timeoutRoutine()
return nil
}
func (t *timeoutTicker) OnStop() {
t.BaseService.OnStop()
t.stopTimer()
}
func (t *timeoutTicker) Chan() <-chan timeoutInfo {
return t.tockChan
}
// The timeoutRoutine is alwaya available to read from tickChan (it won't block).
// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step.
func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) {
t.tickChan <- ti
}
//-------------------------------------------------------------
// stop the timer and drain if necessary
func (t *timeoutTicker) stopTimer() {
// Stop() returns false if it was already fired or was stopped
if !t.timer.Stop() {
select {
case <-t.timer.C:
default:
log.Debug("Timer already stopped")
}
}
}
// send on tickChan to start a new timer.
// timers are interupted and replaced by new ticks from later steps
// timeouts of 0 on the tickChan will be immediately relayed to the tockChan
func (t *timeoutTicker) timeoutRoutine() {
log.Debug("Starting timeout routine")
var ti timeoutInfo
for {
select {
case newti := <-t.tickChan:
log.Debug("Received tick", "old_ti", ti, "new_ti", newti)
// ignore tickers for old height/round/step
if newti.Height < ti.Height {
continue
} else if newti.Height == ti.Height {
if newti.Round < ti.Round {
continue
} else if newti.Round == ti.Round {
if ti.Step > 0 && newti.Step <= ti.Step {
continue
}
}
}
// stop the last timer
t.stopTimer()
// update timeoutInfo and reset timer
// NOTE time.Timer allows duration to be non-positive
ti = newti
t.timer.Reset(ti.Duration)
log.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
case <-t.timer.C:
log.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
// go routine here gaurantees timeoutRoutine doesn't block.
// Determinism comes from playback in the receiveRoutine.
// We can eliminate it by merging the timeoutRoutine into receiveRoutine
// and managing the timeouts ourselves with a millisecond ticker
go func(toi timeoutInfo) { t.tockChan <- toi }(ti)
case <-t.Quit:
return
}
}
}

View File

@@ -1,10 +1,9 @@
package consensus
import (
"bufio"
"os"
"time"
auto "github.com/tendermint/go-autofile"
. "github.com/tendermint/go-common"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
@@ -13,15 +12,15 @@ import (
//--------------------------------------------------------
// types and functions for savings consensus messages
type ConsensusLogMessage struct {
Time time.Time `json:"time"`
Msg ConsensusLogMessageInterface `json:"msg"`
type TimedWALMessage struct {
Time time.Time `json:"time"`
Msg WALMessage `json:"msg"`
}
type ConsensusLogMessageInterface interface{}
type WALMessage interface{}
var _ = wire.RegisterInterface(
struct{ ConsensusLogMessageInterface }{},
struct{ WALMessage }{},
wire.ConcreteType{types.EventDataRoundState{}, 0x01},
wire.ConcreteType{msgInfo{}, 0x02},
wire.ConcreteType{timeoutInfo{}, 0x03},
@@ -35,103 +34,72 @@ var _ = wire.RegisterInterface(
// TODO: currently the wal is overwritten during replay catchup
// give it a mode so it's either reading or appending - must read to end to start appending again
type WAL struct {
fp *os.File
exists bool // if the file already existed (restarted process)
done chan struct{}
BaseService
group *auto.Group
light bool // ignore block parts
}
func NewWAL(file string, light bool) (*WAL, error) {
var walExists bool
if _, err := os.Stat(file); err == nil {
walExists = true
}
fp, err := os.OpenFile(file, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
func NewWAL(walFile string, light bool) (*WAL, error) {
group, err := auto.OpenGroup(walFile)
if err != nil {
return nil, err
}
return &WAL{
fp: fp,
exists: walExists,
done: make(chan struct{}),
light: light,
}, nil
wal := &WAL{
group: group,
light: light,
}
wal.BaseService = *NewBaseService(log, "WAL", wal)
_, err = wal.Start()
return wal, err
}
func (wal *WAL) OnStart() error {
size, err := wal.group.Head.Size()
if err != nil {
return err
} else if size == 0 {
wal.writeEndHeight(0)
}
_, err = wal.group.Start()
return err
}
func (wal *WAL) OnStop() {
wal.BaseService.OnStop()
wal.group.Stop()
}
// called in newStep and for each pass in receiveRoutine
func (wal *WAL) Save(clm ConsensusLogMessageInterface) {
if wal != nil {
if wal.light {
// in light mode we only write new steps, timeouts, and our own votes (no proposals, block parts)
if mi, ok := clm.(msgInfo); ok {
_ = mi
if mi.PeerKey != "" {
return
}
}
}
var n int
var err error
wire.WriteJSON(ConsensusLogMessage{time.Now(), clm}, wal.fp, &n, &err)
wire.WriteTo([]byte("\n"), wal.fp, &n, &err) // one message per line
if err != nil {
PanicQ(Fmt("Error writing msg to consensus wal. Error: %v \n\nMessage: %v", err, clm))
}
}
}
// Must not be called concurrently with a write.
func (wal *WAL) Close() {
if wal != nil {
wal.fp.Close()
}
wal.done <- struct{}{}
}
func (wal *WAL) Wait() {
<-wal.done
}
func (wal *WAL) SeekFromEnd(found func([]byte) bool) (nLines int, err error) {
var current int64
// start at the end
current, err = wal.fp.Seek(0, 2)
if err != nil {
func (wal *WAL) Save(wmsg WALMessage) {
if wal == nil {
return
}
// backup until we find the the right line
// current is how far we are from the beginning
for {
current -= 1
if current < 0 {
wal.fp.Seek(0, 0) // back to beginning
return
}
// backup one and read a new byte
if _, err = wal.fp.Seek(current, 0); err != nil {
return
}
b := make([]byte, 1)
if _, err = wal.fp.Read(b); err != nil {
return
}
if b[0] == '\n' || len(b) == 0 {
nLines += 1
// read a full line
reader := bufio.NewReader(wal.fp)
lineBytes, _ := reader.ReadBytes('\n')
if len(lineBytes) == 0 {
continue
}
if found(lineBytes) {
wal.fp.Seek(0, 1) // (?)
wal.fp.Seek(current, 0)
if wal.light {
// in light mode we only write new steps, timeouts, and our own votes (no proposals, block parts)
if mi, ok := wmsg.(msgInfo); ok {
if mi.PeerKey != "" {
return
}
}
}
// Write the wal message
var wmsgBytes = wire.JSONBytes(TimedWALMessage{time.Now(), wmsg})
err := wal.group.WriteLine(string(wmsgBytes))
if err != nil {
PanicQ(Fmt("Error writing msg to consensus wal. Error: %v \n\nMessage: %v", err, wmsg))
}
// TODO: only flush when necessary
if err := wal.group.Flush(); err != nil {
PanicQ(Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
}
}
func (wal *WAL) writeEndHeight(height int) {
wal.group.WriteLine(Fmt("#ENDHEIGHT: %v", height))
// TODO: only flush when necessary
if err := wal.group.Flush(); err != nil {
PanicQ(Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
}
}

View File

@@ -1,78 +0,0 @@
package consensus
import (
"io/ioutil"
"os"
"path"
"strings"
"testing"
. "github.com/tendermint/go-common"
)
var testTxt = `{"time":"2016-01-16T04:42:00.390Z","msg":[1,{"height":28219,"round":0,"step":"RoundStepPrevote"}]}
{"time":"2016-01-16T04:42:00.390Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":28219,"round":0,"type":1,"block_hash":"67F9689F15BEC30BF311FB4C0C80C5E661AA44E0","block_parts_header":{"total":1,"hash":"DFFD4409A1E273ED61AC27CAF975F446020D5676"},"signature":"4CC6845A128E723A299B470CCBB2A158612AA51321447F6492F3DA57D135C27FCF4124B3B19446A248252BDA45B152819C76AAA5FD35E1C07091885CE6955E05"}}],"peer_key":""}]}
{"time":"2016-01-16T04:42:00.392Z","msg":[1,{"height":28219,"round":0,"step":"RoundStepPrecommit"}]}
{"time":"2016-01-16T04:42:00.392Z","msg":[2,{"msg":[20,{"ValidatorIndex":0,"Vote":{"height":28219,"round":0,"type":2,"block_hash":"67F9689F15BEC30BF311FB4C0C80C5E661AA44E0","block_parts_header":{"total":1,"hash":"DFFD4409A1E273ED61AC27CAF975F446020D5676"},"signature":"1B9924E010F47E0817695DFE462C531196E5A12632434DE12180BBA3EFDAD6B3960FDB9357AFF085EB61729A7D4A6AD8408555D7569C87D9028F280192FD4E05"}}],"peer_key":""}]}
{"time":"2016-01-16T04:42:00.393Z","msg":[1,{"height":28219,"round":0,"step":"RoundStepCommit"}]}
{"time":"2016-01-16T04:42:00.395Z","msg":[1,{"height":28220,"round":0,"step":"RoundStepNewHeight"}]}`
func TestSeek(t *testing.T) {
f, err := ioutil.TempFile(os.TempDir(), "seek_test_")
if err != nil {
panic(err)
}
stat, _ := f.Stat()
name := stat.Name()
_, err = f.WriteString(testTxt)
if err != nil {
panic(err)
}
f.Close()
wal, err := NewWAL(path.Join(os.TempDir(), name), config.GetBool("cswal_light"))
if err != nil {
panic(err)
}
keyWord := "Precommit"
n, err := wal.SeekFromEnd(func(b []byte) bool {
if strings.Contains(string(b), keyWord) {
return true
}
return false
})
if err != nil {
panic(err)
}
// confirm n
spl := strings.Split(testTxt, "\n")
var i int
var s string
for i, s = range spl {
if strings.Contains(s, keyWord) {
break
}
}
// n is lines from the end.
spl = spl[i:]
if n != len(spl) {
panic(Fmt("Wrong nLines. Got %d, expected %d", n, len(spl)))
}
b, err := ioutil.ReadAll(wal.fp)
if err != nil {
panic(err)
}
// first char is a \n
spl2 := strings.Split(strings.Trim(string(b), "\n"), "\n")
for i, s := range spl {
if s != spl2[i] {
panic(Fmt("Mismatch. Got %s, expected %s", spl2[i], s))
}
}
}

16
docs/architecture/ABCI.md Normal file
View File

@@ -0,0 +1,16 @@
# ABCI
ABCI is an interface between the consensus/blockchain engine known as tendermint, and the application-specific business logic, known as an ABCi app.
The tendermint core should run unchanged for all apps. Each app can customize it, the supported transactions, queries, even the validator sets and how to handle staking / slashing stake. This customization is achieved by implementing the ABCi app to send the proper information to the tendermint engine to perform as directed.
To understand this decision better, think of the design of the tendermint engine.
* A blockchain is simply consensus on a unique global ordering of events.
* This consensus can efficiently be implemented using BFT and PoS
* This code can be generalized to easily support a large number of blockchains
* The block-chain specific code, the interpretation of the individual events, can be implemented by a 3rd party app without touching the consensus engine core
* Use an efficient, language-agnostic layer to implement this (ABCi)
Bucky, please make this doc real.

View File

@@ -0,0 +1,16 @@
# Architecture Decision Records
This is a location to record all high-level architecture decisions in the tendermin project. Not the implementation details, but the reasoning that happened. This should be refered to for guidance of the "right way" to extend the application. And if we notice that the original decisions were lacking, we should have another open discussion, record the new decisions here, and then modify the code to match.
This is like our guide and mentor when Jae and Bucky are offline.... The concept comes from a [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t) that resonated among the team when Anton shared it.
Each section of the code can have it's own markdown file in this directory, and please add a link to the readme.
## Sections
* [ABCI](./ABCI.md)
* [go-merkle / merkleeyes](./merkle.md)
* [Frey's thoughts on the data store](./merkle-frey.md)
* basecoin
* tendermint core (multiple sections)
* ???

View File

@@ -0,0 +1,240 @@
# Merkle data stores - Frey's proposal
## TL;DR
To allow the efficient creation of an ABCi app, tendermint wishes to provide a reference implementation of a key-value store that provides merkle proofs of the data. These proofs then quickly allow the ABCi app to provide an app hash to the consensus engine, as well as a full proof to any client.
This is equivalent to building a database, and I would propose designing it from the API first, then looking how to implement this (or make an adapter from the API to existing implementations). Once we agree on the functionality and the interface, we can implement the API bindings, and then work on building adapters to existence merkle-ized data stores, or modifying the stores to support this interface.
We need to consider the API (both in-process and over the network), language bindings, maintaining handles to old state (and garbage collecting), persistence, security, providing merkle proofs, and general key-value store operations. To stay consistent with the blockchains "single global order of operations", this data store should only allow one connection at a time to have write access.
## Overview
* **State**
* There are two concepts of state, "committed state" and "working state"
* The working state is only accessible from the ABCi app, allows writing, but does not need to support proofs.
* When we commit the "working state", it becomes a new "committed state" and has an immutable root hash, provides proofs, and can be exposed to external clients.
* **Transactions**
* The database always allows creating a read-only transaction at the last "committed state", this transaction can serve read queries and proofs.
* The database maintains all data to serve these read transactions until they are closed by the client (or time out). This allows the client(s) to determine how much old info is needed
* The database can only support *maximal* one writable transaction at a time. This makes it easy to enforce serializability, and attempting to start a second writable transaction may trigger a panic.
* **Functionality**
* It must support efficient key-value operations (get/set/delete)
* It must support returning merkle proofs for any "committed state"
* It should support range queries on subsets of the key space if possible (ie. if the db doesn't hash keys)
* It should also support listening to changes to a desired key via pub-sub or similar method, so I can quickly notify you on a change to your balance without constant polling.
* It may support other db-specific query types as an extension to this interface, as long as all specified actions maintain their meaning.
* **Interface**
* This interface should be domain-specific - ie. designed just for this use case
* It should present a simple go interface for embedding the data store in-process
* It should create a gRPC/protobuf API for calling from any client
* It should provide and maintain client adapters from our in-process interface to gRPC client calls for at least golang and Java (maybe more languages?)
* It should provide and maintain server adapters from our gRPC calls to the in-process interface for golang at least (unless there is another server we wish to support)
* **Persistence**
* It must support atomic persistence upon committing a new block. That is, upon crash recovery, the state is guaranteed to represent the state at the end of a complete block (along with a note of which height it was).
* It must delay deletion of old data as long as there are open read-only transactions referring to it, thus we must maintain some sort of WAL to keep track of pending cleanup.
* When a transaction is closed, or when we recover from a crash, it should clean up all no longer needed data to avoid memory/storage leaks.
* **Security and Auth**
* If we allow connections over gRPC, we must consider this issues and allow both encryption (SSL), and some basic auth rules to prevent undesired access to the DB
* This is client-specific and does not need to be supported in the in-process, embedded version.
## Details
Here we go more in-depth in each of the sections, explaining the reasoning and more details on the desired behavior. This document is only the high-level architecture and should support multiple implementations. When building out a specific implementation, a similar document should be provided for that repo, showing how it implements these concepts, and details about memory usage, storage, efficiency, etc.
### State
The current ABCi interface avoids this question a bit and that has brought confusion. If I use `merkleeyes` to store data, which state is returned from `Query`? The current "working" state, which I would like to refer to in my ABCi application? Or the last committed state, which I would like to return to a client's query? Or an old state, which I may select based on height?
Right now, `merkleeyes` implements `Query` like a normal ABCi app and only returns committed state, which has lead to problems and confusion. Thus, we need to be explicit about which state we want to view. Each viewer can then specify which state it wants to view. This allows the app to query the working state in DeliverTx, but the committed state in Query.
We can easily provide two global references for "last committed" and "current working" states. However, if we want to also allow querying of older commits... then we need some way to keep track of which ones are still in use, so we can garbage collect the unneeded ones. There is a non-trivial overhead in holding references to all past states, but also a hard-coded solution (hold onto the last 5 commits) may not support all clients. We should let the client define this somehow.
### Transactions
Transactions (in the typical database sense) are a clean and established solution to this issue. We can look at the [isolations levels](https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable) which attempt to provide us things like "repeatable reads". That means if we open a transaction, and query some data 100 times while other processes are writing to the db, we get the same result each time. This transaction has a reference to its own local state from the time the transaction started. (We are referring to the highest isolation levels here, which correlate well this the blockchain use case).
If we implement a read-only transaction as a reference to state at the time of creation of that transaction, we can then hold these references to various snapshots, one per block that we are interested, and allow the client to multiplex queries and proofs from these various blocks.
If we continue using these concepts (which have informed 30+ years of server side design), we can add a few nice features to our write transactions. The first of which is `Rollback` and `Commit`. That means all the changes we make in this transaction have no effect on the database until they are committed. And until they are committed, we can always abort if we detect an anomaly, returning to the last committed state with a rollback.
There is also a nice extension to this available on some database servers, basically, "nested" transactions or "savepoints". This means that within one transaction, you can open a subtransaction/savepoint and continue work. Later you have the option to commit or rollback all work since the savepoint/subtransaction. And then continue with the main transaction.
If you don't understand why this is useful, look at how basecoin needs to [hold cached state for AppTx](https://github.com/tendermint/basecoin/blob/master/state/execution.go#L126-L149), meaning that it rolls back all modifications if the AppTx returns an error. This was implemented as a wrapper in basecoin, but it is a reasonable thing to support in the DB interface itself (especially since the implementation becomes quite non-trivial as soon as you support range queries).
To give a bit more reference to this concept in practice, read about [Savepoints in Postgresql](https://www.postgresql.org/docs/current/static/tutorial-transactions.html) ([reference](https://www.postgresql.org/docs/current/static/sql-savepoint.html)) or [Nesting transactions in SQL Server](http://dba-presents.com/index.php/databases/sql-server/43-nesting-transactions-and-save-transaction-command) (TL;DR: scroll to the bottom, section "Real nesting transactions with SAVE TRANSACTION")
### Functionality
Merkle trees work with key-value pairs, so we should most importantly focus on the basic Key-Value operations. That is `Get`, `Set`, and `Remove`. We also need to return a merkle proof for any key, along with a root hash of the tree for committing state to the blockchain. This is just the basic merkle-tree stuff.
If it is possible with the implementation, it is nice to provide access to Range Queries. That is, return all values where the key is between X and Y. If you construct your keys wisely, it is possible to store lists (1:N) relations this way. Eg, storing blog posts and the key is blog:`poster_id`:`sequence`, then I could search for all blog posts by a given `poster_id`, or even return just posts 10-19 from the given poster.
The construction of a tree that supports range queries was one of the [design decisions of go-merkle](https://github.com/tendermint/go-merkle/blob/master/README.md). It is also kind of possible with [ethereum's patricia trie](https://github.com/ethereum/wiki/wiki/Patricia-Tree) as long as the key is less than 32 bytes.
In addition to range queries, there is one more nice feature that we could add to our data store - listening to events. Depending on your context, this is "reactive programming", "event emitters", "notifications", etc... But the basic concept is that a client can listen for all changes to a given key (or set of keys), and receive a notification when this happens. This is very important to avoid [repeated polling and wasted queries](http://resthooks.org/) when a client simply wants to [detect changes](https://www.rethinkdb.com/blog/realtime-web/).
If the database provides access to some "listener" functionality, the app can choose to expose this to the external client via websockets, web hooks, http2 push events, android push notifications, etc, etc etc.... But if we want to support modern client functionality, let's add support for this reactive paradigm in our DB interface.
**TODO** support for more advanced backends, eg. Bolt....
### Go Interface
I will start with a simple go interface to illustrate the in-process interface. Once there is agreement on how this looks, we can work out the gRPC bindings to support calling out of process. These interfaces are not finalized code, but I think the demonstrate the concepts better than text and provide a strawman to get feedback.
```
// DB represents the committed state of a merkle-ized key-value store
type DB interface {
// Snapshot returns a reference to last committed state to use for
// providing proofs, you must close it at the end to garbage collect
// the historical state we hold on to to make these proofs
Snapshot() Prover
// Start a transaction - only way to change state
// This will return an error if there is an open Transaction
Begin() (Transaction, error)
// These callbacks are triggered when the Transaction is Committed
// to the DB. They can be used to eg. notify clients via websockets when
// their account balance changes.
AddListener(key []byte, listener Listener)
RemoveListener(listener Listener)
}
// DBReader represents a read-only connection to a snapshot of the db
type DBReader interface {
// Queries on my local view
Has(key []byte) (bool, error)
Get(key []byte) (Model, error)
GetRange(start, end []byte, ascending bool, limit int) ([]Model, error)
Closer
}
// Prover is an interface that lets one query for Proofs, holding the
// data at a specific location in memory
type Prover interface {
DBReader
// Hash is the AppHash (RootHash) for this block
Hash() (hash []byte)
// Prove returns the data along with a merkle Proof
// Model and Proof are nil if not found
Prove(key []byte) (Model, Proof, error)
}
// Transaction is a set of state changes to the DB to be applied atomically.
// There can only be one open transaction at a time, which may only have
// maximum one subtransaction at a time.
// In short, at any time, there is exactly one object that can write to the
// DB, and we can use Subtransactions to group operations and roll them back
// together (kind of like `types.KVCache` from basecoin)
type Transaction interface {
DBReader
// Change the state - will raise error immediately if this Transaction
// is not holding the exclusive write lock
Set(model Model) (err error)
Remove(key []byte) (removed bool, err error)
// Subtransaction starts a new subtransaction, rollback will not affect the
// parent. Only on Commit are the changes applied to this transaction.
// While the subtransaction exists, no write allowed on the parent.
// (You must Commit or Rollback the child to continue)
Subtransaction() Transaction
// Commit this transaction (or subtransaction), the parent reference is
// now updated.
// This only updates persistant store if the top level transaction commits
// (You may have any number of nested sub transactions)
Commit() error
// Rollback ends the transaction and throw away all transaction-local state,
// allowing the tree to prune those elements.
// The parent transaction now recovers the write lock.
Rollback()
}
// Listener registers callbacks on changes to the data store
type Listener interface {
OnSet(key, value, oldValue []byte)
OnRemove(key, oldValue []byte)
}
// Proof represents a merkle proof for a key
type Proof interface {
RootHash() []byte
Verify(key, value, root []byte) bool
}
type Model interface {
Key() []byte
Value() []byte
}
// Closer releases the reference to this state, allowing us to garbage collect
// Make sure to call it before discarding.
type Closer interface {
Close()
}
```
### Remote Interface
The use-case of allowing out-of-process calls is very powerful. Not just to provide a powerful merkle-ready data store to non-go applications.
It we allow the ABCi app to maintain the only writable connections, we can guarantee that all transactions are only processed through the tendermint consensus engine. We could then allow multiple "web server" machines "read-only" access and scale out the database reads, assuming the consensus engine, ABCi logic, and public key cryptography is more the bottleneck than the database. We could even place the consensus engine, ABCi app, and data store on one machine, connected with unix sockets for security, and expose a tcp/ssl interface for reading the data, to scale out query processing over multiple machines.
But returning our focus directly to the ABCi app (which is the most important use case). An app may well want to maintain 100 or 1000 snapshots of different heights to allow people to easily query many proofs at a given height without race conditions (very important for IBC, ask Jae). Thus, we should not require a separate TCP connection for each height, as this gets quite awkward with so many connections. Also, if we want to use gRPC, we should consider the connections potentially transient (although they are more efficient with keep-alive).
Thus, the wire encoding of a transaction or a snapshot should simply return a unique id. All methods on a `Prover` or `Transaction` over the wire can send this id along with the arguments for the method call. And we just need a hash map on the server to map this id to a state.
The only negative of not requiring a persistent tcp connection for each snapshot is there is no auto-detection if the client crashes without explicitly closing the connections. Thus, I would suggest adding a `Ping` thread in the gRPC interface which keeps the Snapshot alive. If no ping is received within a server-defined time, it may automatically close those transactions. And if we consider a client with 500 snapshots that needs to ping each every 10 seconds, that is a lot of overhead, so we should design the ping to accept a list of IDs for the client and update them all. Or associate all snapshots with a clientID and then just send the clientID in the ping. (Please add other ideas on how to detect client crashes without persistent connections).
To encourage adoption, we should provide a nice client that uses this gRPC interface (like we do with ABCi). For go, the client may have the exact same interface as the in-process version, just that the error call may return network errors, not just illegal operations. We should also add a client with a clean API for Java, since that seems to be popular among app developers in the current tendermint community. Other bindings as we see the need in the server space.
### Persistence
Any data store worth it's name should not lose all data on a crash. Even [redis provides some persistence](https://redis.io/topics/persistence) these days. Ideally, if the system crashes and restarts, it should have the data at the last block N that was committed. If the system crash during the commit of block N+1, then the recovered state should either be block N or completely committed block N+1, but no partial state between the two. Basically, the commit must be an atomic operation (even if updating 100's of records).
To avoid a lot of headaches ourselves, we can use an existing data store, such as leveldb, which provides `WriteBatch` to group all operations.
The other issue is cleaning up old state. We cannot delete any information from our persistent store, as long as any snapshot holds a reference to it (or else we get some panics when the data we query is not there). So, we need to store the outstanding deletions that we can perform when the snapshot is `Close`d. In addition, we must consider the case that the data store crashes with open snapshots. Thus, the info on outstanding deletions must also be persisted somewhere. Something like a "delete-behind log" (the opposite of a "write ahead log").
This is not a concern of the generic interface, but each implementation should take care to handle this well to avoid accumulation of unused references in the data store and eventual data bloat.
#### Backing stores
It is way outside the scope of this project to build our own database that is capable of efficiently storing the data, provide multiple read-only snapshots at once, and save it atomically. The best approach seems to select an existing database (best a simple one) that provides this functionality and build upon it, much like the current `go-merkle` implementation builds upon `leveldb`. After some research here are winners and losers:
**Winners**
* Leveldb - [provides consistent snapshots](https://ayende.com/blog/161705/reviewing-leveldb-part-xiii-smile-and-here-is-your-snapshot), and [provides tooling for building ACID compliance](http://codeofrob.com/entries/writing-a-transaction-manager-on-top-of-leveldb.html)
* Note there are at least two solid implementations available in go - [goleveldb](https://github.com/syndtr/goleveldb) - a pure go implementation, and [levigo](https://github.com/jmhodges/levigo) - a go wrapper around leveldb.
* Goleveldb is much easier to compile and cross-compile (not requiring cgo), while levigo (or cleveldb) seems to provide a significant performance boosts (but I had trouble even running benchmarks)
* PostgreSQL - fully supports these ACID semantics if you call `SET TRANSACTION ISOLATION LEVEL SERIALIZABLE` at the beginning of a transaction (tested)
* This may be total overkill unless we also want to make use of other features, like storing data in multiple columns with secondary indexes.
* Trillian can show an example of [how to store a merkle tree in sql](https://github.com/google/trillian/blob/master/storage/mysql/tree_storage.go)
**Losers**
* Bolt - open [read-only snapshots can block writing](https://github.com/boltdb/bolt/issues/378)
* Mongo - [barely even supports atomic operations](https://docs.mongodb.com/manual/core/write-operations-atomicity/), much less multiple snapshots
**To investigate**
* [Trillian](https://github.com/google/trillian) - has a [persistent merkle tree interface](https://github.com/google/trillian/blob/master/storage/tree_storage.go) along with [backend storage with mysql](https://github.com/google/trillian/blob/master/storage/mysql/tree_storage.go), good inspiration for our design if not directly using it
* [Moss](https://github.com/couchbase/moss) - another key-value store in go, seems similar to leveldb, maybe compare with performance tests?
### Security
When allowing access out-of-process, we should provide different mechanisms to secure it. The first is the choice of binding to a local unix socket or a tcp port. The second is the optional use of ssl to encrypt the connection (very important over tcp). The third is authentication to control access to the database.
We may also want to consider the case of two server connections with different permissions, eg. a local unix socket that allows write access with no more credentials, and a public TCP connection with ssl and authentication that only provides read-only access.
The use of ssl is quite easy in go, we just need to generate and sign a certificate, so it is nice to be able to disable it for dev machines, but it is very important for production.
For authentication, let me sketch out a minimal solution. The server could just have a simple config file with key/bcrypt(password) pairs along with read/write permission level, and read that upon startup. The client must provide a username and password in the HTTP headers when making the original HTTPS gRPC connection.
This is super minimal to provide some protection. Things like LDAP, OAuth and single-sign on seem overkill and even potential security holes. Maybe there is another solution somewhere in the middle.

View File

@@ -0,0 +1,17 @@
# Merkle data stores
To allow the efficient creation of an ABCi app, tendermint wishes to provide a reference implemention of a key-value store that provides merkle proofs of the data. These proofs then quickly allow the ABCi app to provide an apphash to the consensus engine, as well as a full proof to any client.
This engine is currently implemented in `go-merkle` with `merkleeyes` providing a language-agnostic binding via ABCi. It uses `go-db` bindings internally to persist data to leveldb.
What are some of the requirements of this store:
* It must support efficient key-value operations (get/set/delete)
* It must support persistance.
* We must only persist complete blocks, so when we come up after a crash we are at the state of block N or N+1, but not in-between these two states.
* It must allow us to read/write from one uncommited state (working state), while serving other queries from the last commited state. And a way to determine which one to serve for each client.
* It must allow us to hold references to old state, to allow providing proofs from 20 blocks ago. We can define some limits as to the maximum time to hold this data.
* We provide in process binding in Go
* We provide language-agnostic bindings when running the data store as it's own process.

145
glide.lock generated
View File

@@ -1,128 +1,181 @@
hash: d87a1fe0061d41c1e6ec78d405d54ae321e75f4bff22b38d19d3255bbd17f21e
updated: 2016-06-11T18:38:47.019992204-07:00
hash: d9724aa287c40d1b3856b6565f09235d809c8b2f7c6537c04f597137c0d6cd26
updated: 2017-04-21T13:09:25.708801802-04:00
imports:
- name: github.com/btcsuite/btcd
version: ff4ada0b0e1ebffa3f9c15cadc96ab0d08a11034
version: 4b348c1d33373d672edd83fc576892d0e46686d2
subpackages:
- btcec
- name: github.com/btcsuite/fastsha256
version: 302ad4db268b46f9ebda3078f6f7397f96047735
- name: github.com/BurntSushi/toml
version: f0aeabca5a127c4078abb8c8d64298b147264b55
version: b26d9c308763d68093482582cea63d69be07a0f0
- name: github.com/davecgh/go-spew
version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
subpackages:
- spew
- name: github.com/ebuchman/fail-test
version: 95f809107225be108efcf10a3509e4ea6ceef3c4
- name: github.com/go-stack/stack
version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82
- name: github.com/gogo/protobuf
version: 318371cbef6bab80e8d1c69b470fffa79eebfb54
version: 100ba4e885062801d56799d78530b73b178a78f3
subpackages:
- proto
- name: github.com/golang/protobuf
version: 8616e8ee5e20a1704615e6c8d7afcdac06087a67
version: 2bba0603135d7d7f5cb73b2125beeda19c09f4ef
subpackages:
- proto
- ptypes/any
- name: github.com/golang/snappy
version: d9eb7a3d35ec988b8585d4a0068e462c27d28380
version: 553a641470496b2327abcac10b36396bd98e45c9
- name: github.com/gorilla/websocket
version: a68708917c6a4f06314ab4e52493cc61359c9d42
version: 3ab3a8b8831546bd18fd182c20687ca853b2bb13
- name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/jmhodges/levigo
version: c42d9e0ca023e2198120196f842701bb4c55d7b9
- name: github.com/mattn/go-colorable
version: 9056b7a9f2d1f2d96498d6d146acd1f9d5ed3d59
version: ded68f7a9561c023e790de24279db7ebf473ea80
- name: github.com/mattn/go-isatty
version: 56b76bdf51f7708750eac80fa38b952bb9f32639
version: fc9e8d8ef48496124e79ae0df75490096eccf6fe
- name: github.com/pkg/errors
version: 645ef00459ed84a119197bfb8d8205042c6df63d
- name: github.com/pmezard/go-difflib
version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
subpackages:
- difflib
- name: github.com/spf13/cobra
version: 10f6b9d7e1631a54ad07c5c0fb71c28a1abfd3c2
- name: github.com/spf13/pflag
version: 367864438f1b1a3c7db4da06a2f55b144e6784e0
version: 2300d0f8576fe575f71aaa5b9bbe4e1b0dc2eb51
- name: github.com/stretchr/testify
version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0
subpackages:
- assert
- require
- name: github.com/syndtr/goleveldb
version: fa5b5c78794bc5c18f330361059f871ae8c2b9d6
version: 8c81ea47d4c41a385645e133e15510fc6a2a74b4
subpackages:
- leveldb
- leveldb/errors
- leveldb/opt
- leveldb/cache
- leveldb/comparer
- leveldb/errors
- leveldb/filter
- leveldb/iterator
- leveldb/journal
- leveldb/memdb
- leveldb/opt
- leveldb/storage
- leveldb/table
- leveldb/util
- name: github.com/tendermint/abci
version: 56e13d87f4e3ec1ea756957d6b23caa6ebcf0998
subpackages:
- client
- example/counter
- example/dummy
- server
- types
- name: github.com/tendermint/ed25519
version: 1f52c6f8b8a5c7908aff4497c186af344b428925
subpackages:
- extra25519
- edwards25519
- name: github.com/tendermint/flowcontrol
version: 84d9671090430e8ec80e35b339907e0579b999eb
- extra25519
- name: github.com/tendermint/go-autofile
version: 48b17de82914e1ec2f134ce823ba426337d2c518
- name: github.com/tendermint/go-clist
version: 3baa390bbaf7634251c42ad69a8682e7e3990552
- name: github.com/tendermint/go-common
version: dee6622bf7f811d3ba8638a3f5ffaf8d679aa9d9
version: f9e3db037330c8a8d61d3966de8473eaf01154fa
subpackages:
- test
- name: github.com/tendermint/go-config
version: e64b424499acd0eb9856b88e10c0dff41628c0d6
version: 620dcbbd7d587cf3599dedbf329b64311b0c307a
- name: github.com/tendermint/go-crypto
version: 41cfb7b677f4e16cdfd22b6ce0946c89919fbc7b
version: 0ca2c6fdb0706001ca4c4b9b80c9f428e8cf39da
- name: github.com/tendermint/go-data
version: e7fcc6d081ec8518912fcdc103188275f83a3ee5
- name: github.com/tendermint/go-db
version: 31fdd21c7eaeed53e0ea7ca597fb1e960e2988a5
version: 9643f60bc2578693844aacf380a7c32e4c029fee
- name: github.com/tendermint/go-events
version: 48fa21511b259278b871a37b6951da2d5bef698d
version: f8ffbfb2be3483e9e7927495590a727f51c0c11f
- name: github.com/tendermint/go-flowrate
version: a20c98e61957faa93b4014fbd902f20ab9317a6a
subpackages:
- flowrate
- name: github.com/tendermint/go-logger
version: cefb3a45c0bf3c493a04e9bcd9b1540528be59f2
- name: github.com/tendermint/go-merkle
version: 05042c6ab9cad51d12e4cecf717ae68e3b1409a8
version: 714d4d04557fd068a7c2a1748241ce8428015a96
- name: github.com/tendermint/go-p2p
version: 929cf433b9c8e987af5f7f3ca3ce717e1e3eda53
version: e8f33a47846708269d373f9c8080613d6c4f66b2
subpackages:
- upnp
- name: github.com/tendermint/go-rpc
version: dea910cd3e71bbfaf1973fd7ba295f0ee515a25f
version: 2c8df0ee6b60d8ac33662df13a4e358c679e02bf
subpackages:
- client
- server
- types
- name: github.com/tendermint/go-wire
version: 3b0adbc86ed8425eaed98516165b6788d9f4de7a
version: c1c9a57ab8038448ddea1714c0698f8051e5748c
- name: github.com/tendermint/log15
version: 9545b249b3aacafa97f79e0838b02b274adc6f5f
version: ae0f3d6450da9eac7074b439c8e1c3cabf0d5ce6
subpackages:
- term
- name: github.com/tendermint/tmsp
version: ba11348508939e9d273cdc1cc476c5c611e14e66
- name: github.com/tendermint/merkleeyes
version: 9fb76efa5aebe773a598f97e68e75fe53d520e70
subpackages:
- app
- client
- example/dummy
- example/nil
- types
- testutil
- name: golang.org/x/crypto
version: 77f4136a99ffb5ecdbdd0226bd5cb146cf56bc0e
version: 96846453c37f0876340a66a47f3f75b1f3a6cd2d
subpackages:
- ripemd160
- curve25519
- nacl/box
- nacl/secretbox
- openpgp/armor
- curve25519
- salsa20/salsa
- poly1305
- openpgp/errors
- poly1305
- ripemd160
- salsa20/salsa
- name: golang.org/x/net
version: 3f122ce3dbbe488b7e6a8bdb26f41edec852a40b
version: c8c74377599bd978aee1cf3b9b63a8634051cec2
subpackages:
- context
- http2
- trace
- http2/hpack
- lex/httplex
- idna
- internal/timeseries
- lex/httplex
- trace
- name: golang.org/x/sys
version: 7f918dd405547ecb864d14a8ecbbfe205b5f930f
version: ea9bcade75cb975a0b9738936568ab388b845617
subpackages:
- unix
- name: golang.org/x/text
version: 19e3104b43db45fca0303f489a9536087b184802
subpackages:
- secure/bidirule
- transform
- unicode/bidi
- unicode/norm
- name: google.golang.org/genproto
version: 411e09b969b1170a9f0c467558eb4c4c110d9c77
subpackages:
- googleapis/rpc/status
- name: google.golang.org/grpc
version: daeb9cc0f2607997cce611a1458e71b981ce5986
version: 6914ab1e338c92da4218a23d27fcd03d0ad78d46
subpackages:
- codes
- credentials
- grpclog
- internal
- keepalive
- metadata
- naming
- transport
- peer
devImports: []
- stats
- status
- tap
- transport
testImports: []

View File

@@ -1,36 +1,56 @@
package: github.com/tendermint/tendermint
import:
- package: github.com/tendermint/go-autofile
version: develop
- package: github.com/tendermint/go-clist
version: develop
- package: github.com/tendermint/go-common
version: develop
- package: github.com/tendermint/go-config
version: develop
- package: github.com/tendermint/go-crypto
version: develop
- package: github.com/tendermint/go-data
version: develop
- package: github.com/tendermint/go-db
version: develop
- package: github.com/tendermint/go-events
version: develop
- package: github.com/tendermint/go-logger
version: develop
- package: github.com/tendermint/go-merkle
version: develop
- package: github.com/tendermint/go-p2p
version: develop
- package: github.com/tendermint/go-rpc
version: develop
- package: github.com/tendermint/go-wire
version: develop
- package: github.com/tendermint/abci
version: develop
- package: github.com/tendermint/go-flowrate
- package: github.com/tendermint/log15
- package: github.com/tendermint/ed25519
- package: github.com/tendermint/merkleeyes
version: develop
subpackages:
- app
- package: github.com/gogo/protobuf
version: ^0.3
subpackages:
- proto
- package: github.com/gorilla/websocket
version: ^1.1.0
- package: github.com/spf13/cobra
- package: github.com/spf13/pflag
- package: github.com/tendermint/ed25519
- package: github.com/tendermint/flowcontrol
- package: github.com/tendermint/go-clist
- package: github.com/tendermint/go-common
- package: github.com/tendermint/go-config
- package: github.com/tendermint/go-crypto
- package: github.com/tendermint/go-db
- package: github.com/tendermint/go-events
- package: github.com/tendermint/go-logger
- package: github.com/tendermint/go-merkle
- package: github.com/tendermint/go-p2p
subpackages:
- upnp
- package: github.com/tendermint/go-rpc
subpackages:
- client
- server
- types
- package: github.com/tendermint/go-wire
- package: github.com/tendermint/log15
- package: github.com/tendermint/tmsp
subpackages:
- client
- example/dummy
- example/nil
- types
- package: github.com/pkg/errors
version: ^0.8.0
- package: golang.org/x/crypto
subpackages:
- ripemd160
testImport:
- package: github.com/stretchr/testify
version: ^1.1.4
subpackages:
- assert
- require

View File

@@ -7,12 +7,13 @@ import (
"sync/atomic"
"time"
abci "github.com/tendermint/abci/types"
auto "github.com/tendermint/go-autofile"
"github.com/tendermint/go-clist"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
tmsp "github.com/tendermint/tmsp/types"
)
/*
@@ -39,7 +40,7 @@ Garbage collection of old elements from mempool.txs is handlde via
the DetachPrev() call, which makes old elements not reachable by
peer broadcastTxRoutine() automatically garbage collected.
TODO: Better handle tmsp client errors. (make it automatically handle connection errors)
TODO: Better handle abci client errors. (make it automatically handle connection errors)
*/
@@ -49,7 +50,7 @@ type Mempool struct {
config cfg.Config
proxyMtx sync.Mutex
proxyAppConn proxy.AppConn
proxyAppConn proxy.AppConnMempool
txs *clist.CList // concurrent linked-list of good txs
counter int64 // simple incrementing counter
height int // the last block Update()'d to
@@ -59,11 +60,13 @@ type Mempool struct {
// Keep a cache of already-seen txs.
// This reduces the pressure on the proxyApp.
cacheMap map[string]struct{}
cacheList *list.List // to remove oldest tx when cache gets too big
cache *txCache
// A log of mempool txs
wal *auto.AutoFile
}
func NewMempool(config cfg.Config, proxyAppConn proxy.AppConn) *Mempool {
func NewMempool(config cfg.Config, proxyAppConn proxy.AppConnMempool) *Mempool {
mempool := &Mempool{
config: config,
proxyAppConn: proxyAppConn,
@@ -74,13 +77,30 @@ func NewMempool(config cfg.Config, proxyAppConn proxy.AppConn) *Mempool {
recheckCursor: nil,
recheckEnd: nil,
cacheMap: make(map[string]struct{}, cacheSize),
cacheList: list.New(),
cache: newTxCache(cacheSize),
}
mempool.initWAL()
proxyAppConn.SetResponseCallback(mempool.resCb)
return mempool
}
func (mem *Mempool) initWAL() {
walDir := mem.config.GetString("mempool_wal_dir")
if walDir != "" {
err := EnsureDir(walDir, 0700)
if err != nil {
log.Error("Error ensuring Mempool wal dir", "error", err)
PanicSanity(err)
}
af, err := auto.OpenAutoFile(walDir + "/wal")
if err != nil {
log.Error("Error opening Mempool wal file", "error", err)
PanicSanity(err)
}
mem.wal = af
}
}
// consensus must be able to hold lock to safely update
func (mem *Mempool) Lock() {
mem.proxyMtx.Lock()
@@ -100,8 +120,7 @@ func (mem *Mempool) Flush() {
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
mem.cacheMap = make(map[string]struct{}, cacheSize)
mem.cacheList.Init()
mem.cache.Reset()
for e := mem.txs.Front(); e != nil; e = e.Next() {
mem.txs.Remove(e)
@@ -120,17 +139,17 @@ func (mem *Mempool) TxsFrontWait() *clist.CElement {
// cb: A callback from the CheckTx command.
// It gets called from another goroutine.
// CONTRACT: Either cb will get called, or err returned.
func (mem *Mempool) CheckTx(tx types.Tx, cb func(*tmsp.Response)) (err error) {
func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
// CACHE
if _, exists := mem.cacheMap[string(tx)]; exists {
if mem.cache.Exists(tx) {
if cb != nil {
cb(&tmsp.Response{
Value: &tmsp.Response_CheckTx{
&tmsp.ResponseCheckTx{
Code: tmsp.CodeType_BadNonce, // TODO or duplicate tx
cb(&abci.Response{
Value: &abci.Response_CheckTx{
&abci.ResponseCheckTx{
Code: abci.CodeType_BadNonce, // TODO or duplicate tx
Log: "Duplicate transaction (ignored)",
},
},
@@ -138,18 +157,17 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*tmsp.Response)) (err error) {
}
return nil
}
if mem.cacheList.Len() >= cacheSize {
popped := mem.cacheList.Front()
poppedTx := popped.Value.(types.Tx)
// NOTE: the tx may have already been removed from the map
// but deleting a non-existant element is fine
delete(mem.cacheMap, string(poppedTx))
mem.cacheList.Remove(popped)
}
mem.cacheMap[string(tx)] = struct{}{}
mem.cacheList.PushBack(tx)
mem.cache.Push(tx)
// END CACHE
// WAL
if mem.wal != nil {
// TODO: Notify administrators when WAL fails
mem.wal.Write([]byte(tx))
mem.wal.Write([]byte("\n"))
}
// END WAL
// NOTE: proxyAppConn may error if tx buffer is full
if err = mem.proxyAppConn.Error(); err != nil {
return err
@@ -162,15 +180,8 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*tmsp.Response)) (err error) {
return nil
}
func (mem *Mempool) removeTxFromCacheMap(tx []byte) {
mem.proxyMtx.Lock()
// NOTE tx not removed from cacheList
delete(mem.cacheMap, string(tx))
mem.proxyMtx.Unlock()
}
// TMSP callback function
func (mem *Mempool) resCb(req *tmsp.Request, res *tmsp.Response) {
// ABCI callback function
func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) {
if mem.recheckCursor == nil {
mem.resCbNormal(req, res)
} else {
@@ -178,10 +189,10 @@ func (mem *Mempool) resCb(req *tmsp.Request, res *tmsp.Response) {
}
}
func (mem *Mempool) resCbNormal(req *tmsp.Request, res *tmsp.Response) {
func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) {
switch r := res.Value.(type) {
case *tmsp.Response_CheckTx:
if r.CheckTx.Code == tmsp.CodeType_OK {
case *abci.Response_CheckTx:
if r.CheckTx.Code == abci.CodeType_OK {
mem.counter++
memTx := &mempoolTx{
counter: mem.counter,
@@ -194,9 +205,7 @@ func (mem *Mempool) resCbNormal(req *tmsp.Request, res *tmsp.Response) {
log.Info("Bad Transaction", "res", r)
// remove from cache (it might be good later)
// note this is an async callback,
// so we need to grab the lock in removeTxFromCacheMap
mem.removeTxFromCacheMap(req.GetCheckTx().Tx)
mem.cache.Remove(req.GetCheckTx().Tx)
// TODO: handle other retcodes
}
@@ -205,15 +214,15 @@ func (mem *Mempool) resCbNormal(req *tmsp.Request, res *tmsp.Response) {
}
}
func (mem *Mempool) resCbRecheck(req *tmsp.Request, res *tmsp.Response) {
func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) {
switch r := res.Value.(type) {
case *tmsp.Response_CheckTx:
case *abci.Response_CheckTx:
memTx := mem.recheckCursor.Value.(*mempoolTx)
if !bytes.Equal(req.GetCheckTx().Tx, memTx.tx) {
PanicSanity(Fmt("Unexpected tx response from proxy during recheck\n"+
"Expected %X, got %X", r.CheckTx.Data, memTx.tx))
}
if r.CheckTx.Code == tmsp.CodeType_OK {
if r.CheckTx.Code == abci.CodeType_OK {
// Good, nothing to do.
} else {
// Tx became invalidated due to newly committed block.
@@ -221,7 +230,7 @@ func (mem *Mempool) resCbRecheck(req *tmsp.Request, res *tmsp.Response) {
mem.recheckCursor.DetachPrev()
// remove from cache (it might be good later)
mem.removeTxFromCacheMap(req.GetCheckTx().Tx)
mem.cache.Remove(req.GetCheckTx().Tx)
}
if mem.recheckCursor == mem.recheckEnd {
mem.recheckCursor = nil
@@ -239,8 +248,8 @@ func (mem *Mempool) resCbRecheck(req *tmsp.Request, res *tmsp.Response) {
}
// Get the valid transactions remaining
// If maxTxs is 0, there is no cap.
func (mem *Mempool) Reap(maxTxs int) []types.Tx {
// If maxTxs is -1, there is no cap on returned transactions.
func (mem *Mempool) Reap(maxTxs int) types.Txs {
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
@@ -254,7 +263,7 @@ func (mem *Mempool) Reap(maxTxs int) []types.Tx {
}
// maxTxs: -1 means uncapped, 0 means none
func (mem *Mempool) collectTxs(maxTxs int) []types.Tx {
func (mem *Mempool) collectTxs(maxTxs int) types.Txs {
if maxTxs == 0 {
return []types.Tx{}
} else if maxTxs < 0 {
@@ -272,9 +281,8 @@ func (mem *Mempool) collectTxs(maxTxs int) []types.Tx {
// Mempool will discard these txs.
// NOTE: this should be called *after* block is committed by consensus.
// NOTE: unsafe; Lock/Unlock must be managed by caller
func (mem *Mempool) Update(height int, txs []types.Tx) {
// mem.proxyMtx.Lock()
// defer mem.proxyMtx.Unlock()
func (mem *Mempool) Update(height int, txs types.Txs) {
// TODO: check err ?
mem.proxyAppConn.FlushSync() // To flush async resCb calls e.g. from CheckTx
// First, create a lookup map of txns in new txs.
@@ -348,3 +356,62 @@ type mempoolTx struct {
func (memTx *mempoolTx) Height() int {
return int(atomic.LoadInt64(&memTx.height))
}
//--------------------------------------------------------------------------------
type txCache struct {
mtx sync.Mutex
size int
map_ map[string]struct{}
list *list.List // to remove oldest tx when cache gets too big
}
func newTxCache(cacheSize int) *txCache {
return &txCache{
size: cacheSize,
map_: make(map[string]struct{}, cacheSize),
list: list.New(),
}
}
func (cache *txCache) Reset() {
cache.mtx.Lock()
cache.map_ = make(map[string]struct{}, cacheSize)
cache.list.Init()
cache.mtx.Unlock()
}
func (cache *txCache) Exists(tx types.Tx) bool {
cache.mtx.Lock()
_, exists := cache.map_[string(tx)]
cache.mtx.Unlock()
return exists
}
// Returns false if tx is in cache.
func (cache *txCache) Push(tx types.Tx) bool {
cache.mtx.Lock()
defer cache.mtx.Unlock()
if _, exists := cache.map_[string(tx)]; exists {
return false
}
if cache.list.Len() >= cache.size {
popped := cache.list.Front()
poppedTx := popped.Value.(types.Tx)
// NOTE: the tx may have already been removed from the map
// but deleting a non-existant element is fine
delete(cache.map_, string(poppedTx))
cache.list.Remove(popped)
}
cache.map_[string(tx)] = struct{}{}
cache.list.PushBack(tx)
return true
}
func (cache *txCache) Remove(tx types.Tx) {
cache.mtx.Lock()
delete(cache.map_, string(tx))
cache.mtx.Unlock()
}

View File

@@ -2,13 +2,12 @@ package mempool
import (
"encoding/binary"
"sync"
"testing"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
tmspcli "github.com/tendermint/tmsp/client"
"github.com/tendermint/tmsp/example/counter"
"github.com/tendermint/abci/example/counter"
)
func TestSerialReap(t *testing.T) {
@@ -16,13 +15,13 @@ func TestSerialReap(t *testing.T) {
app := counter.NewCounterApplication(true)
app.SetOption("serial", "on")
mtx := new(sync.Mutex)
appConnMem := tmspcli.NewLocalClient(mtx, app)
appConnCon := tmspcli.NewLocalClient(mtx, app)
cc := proxy.NewLocalClientCreator(app)
appConnMem, _ := cc.NewABCIClient()
appConnCon, _ := cc.NewABCIClient()
mempool := NewMempool(config, appConnMem)
appendTxsRange := func(start, end int) {
// Append some txs.
deliverTxsRange := func(start, end int) {
// Deliver some txs.
for i := start; i < end; i++ {
// This will succeed
@@ -62,17 +61,17 @@ func TestSerialReap(t *testing.T) {
}
commitRange := func(start, end int) {
// Append some txs.
// Deliver some txs.
for i := start; i < end; i++ {
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(i))
res := appConnCon.AppendTx(txBytes)
res := appConnCon.DeliverTxSync(txBytes)
if !res.IsOK() {
t.Errorf("Error committing tx. Code:%v result:%X log:%v",
res.Code, res.Data, res.Log)
}
}
res := appConnCon.Commit()
res := appConnCon.CommitSync()
if len(res.Data) != 8 {
t.Errorf("Error committing. Hash:%X log:%v", res.Data, res.Log)
}
@@ -80,8 +79,8 @@ func TestSerialReap(t *testing.T) {
//----------------------------------------
// Append some txs.
appendTxsRange(0, 100)
// Deliver some txs.
deliverTxsRange(0, 100)
// Reap the txs.
reapCheck(100)
@@ -89,9 +88,9 @@ func TestSerialReap(t *testing.T) {
// Reap again. We should get the same amount
reapCheck(100)
// Append 0 to 999, we should reap 900 new txs
// Deliver 0 to 999, we should reap 900 new txs
// because 100 were already counted.
appendTxsRange(0, 1000)
deliverTxsRange(0, 1000)
// Reap the txs.
reapCheck(1000)
@@ -106,8 +105,8 @@ func TestSerialReap(t *testing.T) {
// We should have 500 left.
reapCheck(500)
// Append 100 invalid txs and 100 valid txs
appendTxsRange(900, 1100)
// Deliver 100 invalid txs and 100 valid txs
deliverTxsRange(900, 1100)
// We should have 600 now.
reapCheck(600)

View File

@@ -6,14 +6,12 @@ import (
"reflect"
"time"
abci "github.com/tendermint/abci/types"
"github.com/tendermint/go-clist"
. "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-events"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
tmsp "github.com/tendermint/tmsp/types"
)
const (
@@ -28,7 +26,7 @@ type MempoolReactor struct {
p2p.BaseReactor
config cfg.Config
Mempool *Mempool
evsw *events.EventSwitch
evsw types.EventSwitch
}
func NewMempoolReactor(config cfg.Config, mempool *Mempool) *MempoolReactor {
@@ -67,7 +65,7 @@ func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
log.Warn("Error decoding message", "error", err)
return
}
log.Info("Receive", "src", src, "chId", chID, "msg", msg)
log.Debug("Receive", "src", src, "chId", chID, "msg", msg)
switch msg := msg.(type) {
case *TxMessage:
@@ -81,12 +79,12 @@ func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
}
// broadcasting happens from go routines per peer
default:
log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
log.Warn(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
}
// Just an alias for CheckTx since broadcasting happens in peer routines
func (memR *MempoolReactor) BroadcastTx(tx types.Tx, cb func(*tmsp.Response)) error {
func (memR *MempoolReactor) BroadcastTx(tx types.Tx, cb func(*abci.Response)) error {
return memR.Mempool.CheckTx(tx, cb)
}
@@ -110,7 +108,7 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer Peer) {
var next *clist.CElement
for {
if !memR.IsRunning() {
if !memR.IsRunning() || !peer.IsRunning() {
return // Quit!
}
if next == nil {
@@ -143,7 +141,7 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer Peer) {
}
// implements events.Eventable
func (memR *MempoolReactor) SetEventSwitch(evsw *events.EventSwitch) {
func (memR *MempoolReactor) SetEventSwitch(evsw types.EventSwitch) {
memR.evsw = evsw
}

View File

@@ -2,84 +2,111 @@ package node
import (
"bytes"
"io/ioutil"
"errors"
"net"
"net/http"
"strings"
"sync"
"time"
. "github.com/tendermint/go-common"
abci "github.com/tendermint/abci/types"
cmn "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
"github.com/tendermint/go-crypto"
crypto "github.com/tendermint/go-crypto"
dbm "github.com/tendermint/go-db"
"github.com/tendermint/go-events"
"github.com/tendermint/go-p2p"
"github.com/tendermint/go-rpc"
"github.com/tendermint/go-rpc/server"
"github.com/tendermint/go-wire"
p2p "github.com/tendermint/go-p2p"
rpc "github.com/tendermint/go-rpc"
rpcserver "github.com/tendermint/go-rpc/server"
wire "github.com/tendermint/go-wire"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/consensus"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/proxy"
rpccore "github.com/tendermint/tendermint/rpc/core"
grpccore "github.com/tendermint/tendermint/rpc/grpc"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/state/txindex/kv"
"github.com/tendermint/tendermint/state/txindex/null"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
tmspcli "github.com/tendermint/tmsp/client"
"github.com/tendermint/tmsp/example/dummy"
"github.com/tendermint/tmsp/example/nil"
_ "net/http/pprof"
)
import _ "net/http/pprof"
type Node struct {
config cfg.Config
sw *p2p.Switch
evsw *events.EventSwitch
blockStore *bc.BlockStore
bcReactor *bc.BlockchainReactor
mempoolReactor *mempl.MempoolReactor
consensusState *consensus.ConsensusState
consensusReactor *consensus.ConsensusReactor
privValidator *types.PrivValidator
genesisDoc *types.GenesisDoc
privKey crypto.PrivKeyEd25519
cmn.BaseService
// config
config cfg.Config // user config
genesisDoc *types.GenesisDoc // initial validator set
privValidator *types.PrivValidator // local node's validator key
// network
privKey crypto.PrivKeyEd25519 // local node's p2p key
sw *p2p.Switch // p2p connections
addrBook *p2p.AddrBook // known peers
// services
evsw types.EventSwitch // pub/sub for services
blockStore *bc.BlockStore // store the blockchain to disk
bcReactor *bc.BlockchainReactor // for fast-syncing
mempoolReactor *mempl.MempoolReactor // for gossipping transactions
consensusState *consensus.ConsensusState // latest consensus state
consensusReactor *consensus.ConsensusReactor // for participating in the consensus
proxyApp proxy.AppConns // connection to the application
rpcListeners []net.Listener // rpc servers
txIndexer txindex.TxIndexer
}
func NewNode(config cfg.Config, privValidator *types.PrivValidator, getProxyApp func(proxyAddr, transport string, appHash []byte) proxy.AppConn) *Node {
func NewNodeDefault(config cfg.Config) *Node {
// Get PrivValidator
privValidatorFile := config.GetString("priv_validator_file")
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
return NewNode(config, privValidator, proxy.DefaultClientCreator(config))
}
EnsureDir(config.GetString("db_dir"), 0700) // incase we use memdb, cswal still gets written here
func NewNode(config cfg.Config, privValidator *types.PrivValidator, clientCreator proxy.ClientCreator) *Node {
// Get BlockStore
blockStoreDB := dbm.NewDB("blockstore", config.GetString("db_backend"), config.GetString("db_dir"))
blockStore := bc.NewBlockStore(blockStoreDB)
// Get State db
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
// Get State
state := getState(config, stateDB)
// Create two proxyAppConn connections,
// one for the consensus and one for the mempool.
proxyAddr := config.GetString("proxy_app")
transport := config.GetString("tmsp")
proxyAppConnMempool := getProxyApp(proxyAddr, transport, state.AppHash)
proxyAppConnConsensus := getProxyApp(proxyAddr, transport, state.AppHash)
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
state := sm.GetState(config, stateDB)
// add the chainid and number of validators to the global config
config.Set("chain_id", state.ChainID)
config.Set("num_vals", state.Validators.Size())
// Create the proxyApp, which manages connections (consensus, mempool, query)
// and sync tendermint and the app by replaying any necessary blocks
proxyApp := proxy.NewAppConns(config, clientCreator, consensus.NewHandshaker(config, state, blockStore))
if _, err := proxyApp.Start(); err != nil {
cmn.Exit(cmn.Fmt("Error starting proxy app connections: %v", err))
}
// reload the state (it may have been updated by the handshake)
state = sm.LoadState(stateDB)
// Transaction indexing
var txIndexer txindex.TxIndexer
switch config.GetString("tx_index") {
case "kv":
store := dbm.NewDB("tx_index", config.GetString("db_backend"), config.GetString("db_dir"))
txIndexer = kv.NewTxIndex(store)
default:
txIndexer = &null.TxIndex{}
}
state.TxIndexer = txIndexer
// Generate node PrivKey
privKey := crypto.GenPrivKeyEd25519()
// Make event switch
eventSwitch := events.NewEventSwitch()
eventSwitch := types.NewEventSwitch()
_, err := eventSwitch.Start()
if err != nil {
Exit(Fmt("Failed to start switch: %v", err))
cmn.Exit(cmn.Fmt("Failed to start switch: %v", err))
}
// Decide whether to fast-sync or not
@@ -93,24 +120,18 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, getProxyApp
}
// Make BlockchainReactor
bcReactor := bc.NewBlockchainReactor(state.Copy(), proxyAppConnConsensus, blockStore, fastSync)
bcReactor := bc.NewBlockchainReactor(config, state.Copy(), proxyApp.Consensus(), blockStore, fastSync)
// Make MempoolReactor
mempool := mempl.NewMempool(config, proxyAppConnMempool)
mempool := mempl.NewMempool(config, proxyApp.Mempool())
mempoolReactor := mempl.NewMempoolReactor(config, mempool)
// Make ConsensusReactor
consensusState := consensus.NewConsensusState(config, state.Copy(), proxyAppConnConsensus, blockStore, mempool)
consensusReactor := consensus.NewConsensusReactor(consensusState, blockStore, fastSync)
consensusState := consensus.NewConsensusState(config, state.Copy(), proxyApp.Consensus(), blockStore, mempool)
if privValidator != nil {
consensusReactor.SetPrivValidator(privValidator)
}
// deterministic accountability
err = consensusState.OpenWAL(config.GetString("cswal"))
if err != nil {
log.Error("Failed to open cswal", "error", err.Error())
consensusState.SetPrivValidator(privValidator)
}
consensusReactor := consensus.NewConsensusReactor(consensusState, fastSync)
// Make p2p network switch
sw := p2p.NewSwitch(config.GetConfig("p2p"))
@@ -118,6 +139,41 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, getProxyApp
sw.AddReactor("BLOCKCHAIN", bcReactor)
sw.AddReactor("CONSENSUS", consensusReactor)
// Optionally, start the pex reactor
var addrBook *p2p.AddrBook
if config.GetBool("pex_reactor") {
addrBook = p2p.NewAddrBook(config.GetString("addrbook_file"), config.GetBool("addrbook_strict"))
pexReactor := p2p.NewPEXReactor(addrBook)
sw.AddReactor("PEX", pexReactor)
}
// Filter peers by addr or pubkey with an ABCI query.
// If the query return code is OK, add peer.
// XXX: Query format subject to change
if config.GetBool("filter_peers") {
// NOTE: addr is ip:port
sw.SetAddrFilter(func(addr net.Addr) error {
resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/addr/%s", addr.String())})
if err != nil {
return err
}
if resQuery.Code.IsOK() {
return nil
}
return errors.New(resQuery.Code.String())
})
sw.SetPubKeyFilter(func(pubkey crypto.PubKeyEd25519) error {
resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/pubkey/%X", pubkey.Bytes())})
if err != nil {
return err
}
if resQuery.Code.IsOK() {
return nil
}
return errors.New(resQuery.Code.String())
})
}
// add the event switch to all services
// they should all satisfy events.Eventable
SetEventSwitch(eventSwitch, bcReactor, mempoolReactor, consensusReactor)
@@ -125,42 +181,94 @@ func NewNode(config cfg.Config, privValidator *types.PrivValidator, getProxyApp
// run the profile server
profileHost := config.GetString("prof_laddr")
if profileHost != "" {
go func() {
log.Warn("Profile server", "error", http.ListenAndServe(profileHost, nil))
}()
}
return &Node{
config: config,
sw: sw,
node := &Node{
config: config,
genesisDoc: state.GenesisDoc,
privValidator: privValidator,
privKey: privKey,
sw: sw,
addrBook: addrBook,
evsw: eventSwitch,
blockStore: blockStore,
bcReactor: bcReactor,
mempoolReactor: mempoolReactor,
consensusState: consensusState,
consensusReactor: consensusReactor,
privValidator: privValidator,
genesisDoc: state.GenesisDoc,
privKey: privKey,
proxyApp: proxyApp,
txIndexer: txIndexer,
}
node.BaseService = *cmn.NewBaseService(log, "Node", node)
return node
}
// Call Start() after adding the listeners.
func (n *Node) Start() error {
n.sw.SetNodeInfo(makeNodeInfo(n.config, n.sw, n.privKey))
func (n *Node) OnStart() error {
// Create & add listener
protocol, address := ProtocolAndAddress(n.config.GetString("node_laddr"))
l := p2p.NewDefaultListener(protocol, address, n.config.GetBool("skip_upnp"))
n.sw.AddListener(l)
// Start the switch
n.sw.SetNodeInfo(n.makeNodeInfo())
n.sw.SetNodePrivKey(n.privKey)
_, err := n.sw.Start()
return err
if err != nil {
return err
}
// If seeds exist, add them to the address book and dial out
if n.config.GetString("seeds") != "" {
// dial out
seeds := strings.Split(n.config.GetString("seeds"), ",")
if err := n.DialSeeds(seeds); err != nil {
return err
}
}
// Run the RPC server
if n.config.GetString("rpc_laddr") != "" {
listeners, err := n.startRPC()
if err != nil {
return err
}
n.rpcListeners = listeners
}
return nil
}
func (n *Node) Stop() {
func (n *Node) OnStop() {
n.BaseService.OnStop()
log.Notice("Stopping Node")
// TODO: gracefully disconnect from peers.
n.sw.Stop()
for _, l := range n.rpcListeners {
log.Info("Closing rpc listener", "listener", l)
if err := l.Close(); err != nil {
log.Error("Error closing listener", "listener", l, "error", err)
}
}
}
func (n *Node) RunForever() {
// Sleep forever and then...
cmn.TrapSignal(func() {
n.Stop()
})
}
// Add the event switch to reactors, mempool, etc.
func SetEventSwitch(evsw *events.EventSwitch, eventables ...events.Eventable) {
func SetEventSwitch(evsw types.EventSwitch, eventables ...types.Eventable) {
for _, e := range eventables {
e.SetEventSwitch(evsw)
}
@@ -170,22 +278,27 @@ func SetEventSwitch(evsw *events.EventSwitch, eventables ...events.Eventable) {
// Add listeners before starting the Node.
// The first listener is the primary listener (in NodeInfo)
func (n *Node) AddListener(l p2p.Listener) {
log.Notice(Fmt("Added %v", l))
n.sw.AddListener(l)
}
func (n *Node) StartRPC() ([]net.Listener, error) {
// ConfigureRPC sets all variables in rpccore so they will serve
// rpc calls from this node
func (n *Node) ConfigureRPC() {
rpccore.SetConfig(n.config)
rpccore.SetEventSwitch(n.evsw)
rpccore.SetBlockStore(n.blockStore)
rpccore.SetConsensusState(n.consensusState)
rpccore.SetConsensusReactor(n.consensusReactor)
rpccore.SetMempoolReactor(n.mempoolReactor)
rpccore.SetMempool(n.mempoolReactor.Mempool)
rpccore.SetSwitch(n.sw)
rpccore.SetPrivValidator(n.privValidator)
rpccore.SetPubKey(n.privValidator.PubKey)
rpccore.SetGenesisDoc(n.genesisDoc)
rpccore.SetAddrBook(n.addrBook)
rpccore.SetProxyAppQuery(n.proxyApp.Query())
rpccore.SetTxIndexer(n.txIndexer)
}
func (n *Node) startRPC() ([]net.Listener, error) {
n.ConfigureRPC()
listenAddrs := strings.Split(n.config.GetString("rpc_laddr"), ",")
// we may expose the rpc over both a unix and tcp socket
@@ -201,6 +314,17 @@ func (n *Node) StartRPC() ([]net.Listener, error) {
}
listeners[i] = listener
}
// we expose a simplified api over grpc for convenience to app devs
grpcListenAddr := n.config.GetString("grpc_laddr")
if grpcListenAddr != "" {
listener, err := grpccore.StartGRPCServer(grpcListenAddr)
if err != nil {
return nil, err
}
listeners = append(listeners, listener)
}
return listeners, nil
}
@@ -224,7 +348,7 @@ func (n *Node) MempoolReactor() *mempl.MempoolReactor {
return n.mempoolReactor
}
func (n *Node) EventSwitch() *events.EventSwitch {
func (n *Node) EventSwitch() types.EventSwitch {
return n.evsw
}
@@ -233,218 +357,72 @@ func (n *Node) PrivValidator() *types.PrivValidator {
return n.privValidator
}
func makeNodeInfo(config cfg.Config, sw *p2p.Switch, privKey crypto.PrivKeyEd25519) *p2p.NodeInfo {
func (n *Node) GenesisDoc() *types.GenesisDoc {
return n.genesisDoc
}
func (n *Node) ProxyApp() proxy.AppConns {
return n.proxyApp
}
func (n *Node) makeNodeInfo() *p2p.NodeInfo {
txIndexerStatus := "on"
if _, ok := n.txIndexer.(*null.TxIndex); ok {
txIndexerStatus = "off"
}
nodeInfo := &p2p.NodeInfo{
PubKey: privKey.PubKey().(crypto.PubKeyEd25519),
Moniker: config.GetString("moniker"),
Network: config.GetString("chain_id"),
PubKey: n.privKey.PubKey().(crypto.PubKeyEd25519),
Moniker: n.config.GetString("moniker"),
Network: n.config.GetString("chain_id"),
Version: version.Version,
Other: []string{
Fmt("wire_version=%v", wire.Version),
Fmt("p2p_version=%v", p2p.Version),
Fmt("consensus_version=%v", consensus.Version),
Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version),
cmn.Fmt("wire_version=%v", wire.Version),
cmn.Fmt("p2p_version=%v", p2p.Version),
cmn.Fmt("consensus_version=%v", consensus.Version),
cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version),
cmn.Fmt("tx_index=%v", txIndexerStatus),
},
}
// include git hash in the nodeInfo if available
if rev, err := ReadFile(config.GetString("revision_file")); err == nil {
nodeInfo.Other = append(nodeInfo.Other, Fmt("revision=%v", string(rev)))
if rev, err := cmn.ReadFile(n.config.GetString("revision_file")); err == nil {
nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("revision=%v", string(rev)))
}
if !sw.IsListening() {
if !n.sw.IsListening() {
return nodeInfo
}
p2pListener := sw.Listeners()[0]
p2pListener := n.sw.Listeners()[0]
p2pHost := p2pListener.ExternalAddress().IP.String()
p2pPort := p2pListener.ExternalAddress().Port
rpcListenAddr := config.GetString("rpc_laddr")
rpcListenAddr := n.config.GetString("rpc_laddr")
// We assume that the rpcListener has the same ExternalAddress.
// This is probably true because both P2P and RPC listeners use UPnP,
// except of course if the rpc is only bound to localhost
nodeInfo.ListenAddr = Fmt("%v:%v", p2pHost, p2pPort)
nodeInfo.Other = append(nodeInfo.Other, Fmt("rpc_addr=%v", rpcListenAddr))
nodeInfo.ListenAddr = cmn.Fmt("%v:%v", p2pHost, p2pPort)
nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("rpc_addr=%v", rpcListenAddr))
return nodeInfo
}
// Get a connection to the proxyAppConn addr.
// Check the current hash, and panic if it doesn't match.
func GetProxyApp(addr, transport string, hash []byte) (proxyAppConn proxy.AppConn) {
// use local app (for testing)
switch addr {
case "nilapp":
app := nilapp.NewNilApplication()
mtx := new(sync.Mutex)
proxyAppConn = tmspcli.NewLocalClient(mtx, app)
case "dummy":
app := dummy.NewDummyApplication()
mtx := new(sync.Mutex)
proxyAppConn = tmspcli.NewLocalClient(mtx, app)
default:
// Run forever in a loop
remoteApp, err := proxy.NewRemoteAppConn(addr, transport)
if err != nil {
Exit(Fmt("Failed to connect to proxy for mempool: %v", err))
}
proxyAppConn = remoteApp
}
// Check the hash
res := proxyAppConn.CommitSync()
if res.IsErr() {
PanicCrisis(Fmt("Error in getting proxyAppConn hash: %v", res))
}
if !bytes.Equal(hash, res.Data) {
log.Warn(Fmt("ProxyApp hash does not match. Expected %X, got %X", hash, res.Data))
}
return proxyAppConn
}
// Load the most recent state from "state" db,
// or create a new one (and save) from genesis.
func getState(config cfg.Config, stateDB dbm.DB) *sm.State {
state := sm.LoadState(stateDB)
if state == nil {
state = sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
state.Save()
}
return state
}
//------------------------------------------------------------------------------
// Users wishing to use an external signer for their validators
// should fork tendermint/tendermint and implement RunNode to
// load their custom priv validator and call NewNode(privVal, getProxyFunc)
func RunNode(config cfg.Config) {
// Wait until the genesis doc becomes available
genDocFile := config.GetString("genesis_file")
if !FileExists(genDocFile) {
log.Notice(Fmt("Waiting for genesis file %v...", genDocFile))
for {
time.Sleep(time.Second)
if !FileExists(genDocFile) {
continue
}
jsonBlob, err := ioutil.ReadFile(genDocFile)
if err != nil {
Exit(Fmt("Couldn't read GenesisDoc file: %v", err))
}
genDoc := types.GenesisDocFromJSON(jsonBlob)
if genDoc.ChainID == "" {
PanicSanity(Fmt("Genesis doc %v must include non-empty chain_id", genDocFile))
}
config.Set("chain_id", genDoc.ChainID)
}
}
// Get PrivValidator
privValidatorFile := config.GetString("priv_validator_file")
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
// Create & start node
n := NewNode(config, privValidator, GetProxyApp)
l := p2p.NewDefaultListener("tcp", config.GetString("node_laddr"), config.GetBool("skip_upnp"))
n.AddListener(l)
err := n.Start()
if err != nil {
Exit(Fmt("Failed to start node: %v", err))
}
log.Notice("Started node", "nodeInfo", n.sw.NodeInfo())
// If seedNode is provided by config, dial out.
if config.GetString("seeds") != "" {
seeds := strings.Split(config.GetString("seeds"), ",")
n.sw.DialSeeds(seeds)
}
// Run the RPC server.
if config.GetString("rpc_laddr") != "" {
_, err := n.StartRPC()
if err != nil {
PanicCrisis(err)
}
}
// Sleep forever and then...
TrapSignal(func() {
n.Stop()
})
}
func (n *Node) NodeInfo() *p2p.NodeInfo {
return n.sw.NodeInfo()
}
func (n *Node) DialSeeds(seeds []string) {
n.sw.DialSeeds(seeds)
func (n *Node) DialSeeds(seeds []string) error {
return n.sw.DialSeeds(n.addrBook, seeds)
}
//------------------------------------------------------------------------------
// replay
// convenience for replay mode
func newConsensusState(config cfg.Config) *consensus.ConsensusState {
// Get BlockStore
blockStoreDB := dbm.NewDB("blockstore", config.GetString("db_backend"), config.GetString("db_dir"))
blockStore := bc.NewBlockStore(blockStoreDB)
// Get State
stateDB := dbm.NewDB("state", config.GetString("db_backend"), config.GetString("db_dir"))
state := sm.MakeGenesisStateFromFile(stateDB, config.GetString("genesis_file"))
// Create two proxyAppConn connections,
// one for the consensus and one for the mempool.
proxyAddr := config.GetString("proxy_app")
transport := config.GetString("tmsp")
proxyAppConnMempool := GetProxyApp(proxyAddr, transport, state.AppHash)
proxyAppConnConsensus := GetProxyApp(proxyAddr, transport, state.AppHash)
// add the chainid to the global config
config.Set("chain_id", state.ChainID)
// Make event switch
eventSwitch := events.NewEventSwitch()
_, err := eventSwitch.Start()
if err != nil {
Exit(Fmt("Failed to start event switch: %v", err))
// Defaults to tcp
func ProtocolAndAddress(listenAddr string) (string, string) {
protocol, address := "tcp", listenAddr
parts := strings.SplitN(address, "://", 2)
if len(parts) == 2 {
protocol, address = parts[0], parts[1]
}
mempool := mempl.NewMempool(config, proxyAppConnMempool)
consensusState := consensus.NewConsensusState(config, state.Copy(), proxyAppConnConsensus, blockStore, mempool)
consensusState.SetEventSwitch(eventSwitch)
return consensusState
}
func RunReplayConsole(config cfg.Config) {
walFile := config.GetString("cswal")
if walFile == "" {
Exit("cswal file name not set in tendermint config")
}
consensusState := newConsensusState(config)
if err := consensusState.ReplayConsole(walFile); err != nil {
Exit(Fmt("Error during consensus replay: %v", err))
}
}
func RunReplay(config cfg.Config) {
walFile := config.GetString("cswal")
if walFile == "" {
Exit("cswal file name not set in tendermint config")
}
consensusState := newConsensusState(config)
if err := consensusState.ReplayMessages(walFile); err != nil {
Exit(Fmt("Error during consensus replay: %v", err))
}
log.Notice("Replay run successfully")
return protocol, address
}

View File

@@ -4,25 +4,21 @@ import (
"testing"
"time"
"github.com/tendermint/go-p2p"
"github.com/tendermint/tendermint/config/tendermint_test"
"github.com/tendermint/tendermint/types"
)
func TestNodeStartStop(t *testing.T) {
config := tendermint_test.ResetConfig("node_node_test")
// Get PrivValidator
privValidatorFile := config.GetString("priv_validator_file")
privValidator := types.LoadOrGenPrivValidator(privValidatorFile)
// Create & start node
n := NewNode(config, privValidator, GetProxyApp)
l := p2p.NewDefaultListener("tcp", config.GetString("node_laddr"), config.GetBool("skip_upnp"))
n.AddListener(l)
n := NewNodeDefault(config)
n.Start()
log.Notice("Started node", "nodeInfo", n.sw.NodeInfo())
// Wait a bit to initialize
// TODO remove time.Sleep(), make asynchronous.
time.Sleep(time.Second * 2)
ch := make(chan struct{}, 1)
go func() {
n.Stop()

View File

@@ -1,9 +1,144 @@
package proxy
import (
tmspcli "github.com/tendermint/tmsp/client"
abcicli "github.com/tendermint/abci/client"
"github.com/tendermint/abci/types"
)
type AppConn interface {
tmspcli.Client
//----------------------------------------------------------------------------------------
// Enforce which abci msgs can be sent on a connection at the type level
type AppConnConsensus interface {
SetResponseCallback(abcicli.Callback)
Error() error
InitChainSync(validators []*types.Validator) (err error)
BeginBlockSync(hash []byte, header *types.Header) (err error)
DeliverTxAsync(tx []byte) *abcicli.ReqRes
EndBlockSync(height uint64) (types.ResponseEndBlock, error)
CommitSync() (res types.Result)
}
type AppConnMempool interface {
SetResponseCallback(abcicli.Callback)
Error() error
CheckTxAsync(tx []byte) *abcicli.ReqRes
FlushAsync() *abcicli.ReqRes
FlushSync() error
}
type AppConnQuery interface {
Error() error
EchoSync(string) (res types.Result)
InfoSync() (resInfo types.ResponseInfo, err error)
QuerySync(reqQuery types.RequestQuery) (resQuery types.ResponseQuery, err error)
// SetOptionSync(key string, value string) (res types.Result)
}
//-----------------------------------------------------------------------------------------
// Implements AppConnConsensus (subset of abcicli.Client)
type appConnConsensus struct {
appConn abcicli.Client
}
func NewAppConnConsensus(appConn abcicli.Client) *appConnConsensus {
return &appConnConsensus{
appConn: appConn,
}
}
func (app *appConnConsensus) SetResponseCallback(cb abcicli.Callback) {
app.appConn.SetResponseCallback(cb)
}
func (app *appConnConsensus) Error() error {
return app.appConn.Error()
}
func (app *appConnConsensus) InitChainSync(validators []*types.Validator) (err error) {
return app.appConn.InitChainSync(validators)
}
func (app *appConnConsensus) BeginBlockSync(hash []byte, header *types.Header) (err error) {
return app.appConn.BeginBlockSync(hash, header)
}
func (app *appConnConsensus) DeliverTxAsync(tx []byte) *abcicli.ReqRes {
return app.appConn.DeliverTxAsync(tx)
}
func (app *appConnConsensus) EndBlockSync(height uint64) (types.ResponseEndBlock, error) {
return app.appConn.EndBlockSync(height)
}
func (app *appConnConsensus) CommitSync() (res types.Result) {
return app.appConn.CommitSync()
}
//------------------------------------------------
// Implements AppConnMempool (subset of abcicli.Client)
type appConnMempool struct {
appConn abcicli.Client
}
func NewAppConnMempool(appConn abcicli.Client) *appConnMempool {
return &appConnMempool{
appConn: appConn,
}
}
func (app *appConnMempool) SetResponseCallback(cb abcicli.Callback) {
app.appConn.SetResponseCallback(cb)
}
func (app *appConnMempool) Error() error {
return app.appConn.Error()
}
func (app *appConnMempool) FlushAsync() *abcicli.ReqRes {
return app.appConn.FlushAsync()
}
func (app *appConnMempool) FlushSync() error {
return app.appConn.FlushSync()
}
func (app *appConnMempool) CheckTxAsync(tx []byte) *abcicli.ReqRes {
return app.appConn.CheckTxAsync(tx)
}
//------------------------------------------------
// Implements AppConnQuery (subset of abcicli.Client)
type appConnQuery struct {
appConn abcicli.Client
}
func NewAppConnQuery(appConn abcicli.Client) *appConnQuery {
return &appConnQuery{
appConn: appConn,
}
}
func (app *appConnQuery) Error() error {
return app.appConn.Error()
}
func (app *appConnQuery) EchoSync(msg string) (res types.Result) {
return app.appConn.EchoSync(msg)
}
func (app *appConnQuery) InfoSync() (types.ResponseInfo, error) {
return app.appConn.InfoSync()
}
func (app *appConnQuery) QuerySync(reqQuery types.RequestQuery) (types.ResponseQuery, error) {
return app.appConn.QuerySync(reqQuery)
}

124
proxy/app_conn_test.go Normal file
View File

@@ -0,0 +1,124 @@
package proxy
import (
"strings"
"testing"
. "github.com/tendermint/go-common"
abcicli "github.com/tendermint/abci/client"
"github.com/tendermint/abci/example/dummy"
"github.com/tendermint/abci/server"
"github.com/tendermint/abci/types"
)
//----------------------------------------
type AppConnTest interface {
EchoAsync(string) *abcicli.ReqRes
FlushSync() error
InfoSync() (types.ResponseInfo, error)
}
type appConnTest struct {
appConn abcicli.Client
}
func NewAppConnTest(appConn abcicli.Client) AppConnTest {
return &appConnTest{appConn}
}
func (app *appConnTest) EchoAsync(msg string) *abcicli.ReqRes {
return app.appConn.EchoAsync(msg)
}
func (app *appConnTest) FlushSync() error {
return app.appConn.FlushSync()
}
func (app *appConnTest) InfoSync() (types.ResponseInfo, error) {
return app.appConn.InfoSync()
}
//----------------------------------------
var SOCKET = "socket"
func TestEcho(t *testing.T) {
sockPath := Fmt("unix:///tmp/echo_%v.sock", RandStr(6))
clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true)
// Start server
s, err := server.NewSocketServer(sockPath, dummy.NewDummyApplication())
if err != nil {
Exit(err.Error())
}
defer s.Stop()
// Start client
cli, err := clientCreator.NewABCIClient()
if err != nil {
Exit(err.Error())
}
proxy := NewAppConnTest(cli)
t.Log("Connected")
for i := 0; i < 1000; i++ {
proxy.EchoAsync(Fmt("echo-%v", i))
}
proxy.FlushSync()
}
func BenchmarkEcho(b *testing.B) {
b.StopTimer() // Initialize
sockPath := Fmt("unix:///tmp/echo_%v.sock", RandStr(6))
clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true)
// Start server
s, err := server.NewSocketServer(sockPath, dummy.NewDummyApplication())
if err != nil {
Exit(err.Error())
}
defer s.Stop()
// Start client
cli, err := clientCreator.NewABCIClient()
if err != nil {
Exit(err.Error())
}
proxy := NewAppConnTest(cli)
b.Log("Connected")
echoString := strings.Repeat(" ", 200)
b.StartTimer() // Start benchmarking tests
for i := 0; i < b.N; i++ {
proxy.EchoAsync(echoString)
}
proxy.FlushSync()
b.StopTimer()
// info := proxy.InfoSync()
//b.Log("N: ", b.N, info)
}
func TestInfo(t *testing.T) {
sockPath := Fmt("unix:///tmp/echo_%v.sock", RandStr(6))
clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true)
// Start server
s, err := server.NewSocketServer(sockPath, dummy.NewDummyApplication())
if err != nil {
Exit(err.Error())
}
defer s.Stop()
// Start client
cli, err := clientCreator.NewABCIClient()
if err != nil {
Exit(err.Error())
}
proxy := NewAppConnTest(cli)
t.Log("Connected")
resInfo, err := proxy.InfoSync()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if string(resInfo.Data) != "{\"size\":0}" {
t.Error("Expected ResponseInfo with one element '{\"size\":0}' but got something else")
}
}

81
proxy/client.go Normal file
View File

@@ -0,0 +1,81 @@
package proxy
import (
"fmt"
"sync"
abcicli "github.com/tendermint/abci/client"
"github.com/tendermint/abci/example/dummy"
"github.com/tendermint/abci/types"
cfg "github.com/tendermint/go-config"
)
// NewABCIClient returns newly connected client
type ClientCreator interface {
NewABCIClient() (abcicli.Client, error)
}
//----------------------------------------------------
// local proxy uses a mutex on an in-proc app
type localClientCreator struct {
mtx *sync.Mutex
app types.Application
}
func NewLocalClientCreator(app types.Application) ClientCreator {
return &localClientCreator{
mtx: new(sync.Mutex),
app: app,
}
}
func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) {
return abcicli.NewLocalClient(l.mtx, l.app), nil
}
//---------------------------------------------------------------
// remote proxy opens new connections to an external app process
type remoteClientCreator struct {
addr string
transport string
mustConnect bool
}
func NewRemoteClientCreator(addr, transport string, mustConnect bool) ClientCreator {
return &remoteClientCreator{
addr: addr,
transport: transport,
mustConnect: mustConnect,
}
}
func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) {
// Run forever in a loop
remoteApp, err := abcicli.NewClient(r.addr, r.transport, r.mustConnect)
if err != nil {
return nil, fmt.Errorf("Failed to connect to proxy: %v", err)
}
return remoteApp, nil
}
//-----------------------------------------------------------------
// default
func DefaultClientCreator(config cfg.Config) ClientCreator {
addr := config.GetString("proxy_app")
transport := config.GetString("abci")
switch addr {
case "dummy":
return NewLocalClientCreator(dummy.NewDummyApplication())
case "persistent_dummy":
return NewLocalClientCreator(dummy.NewPersistentDummyApplication(config.GetString("db_dir")))
case "nilapp":
return NewLocalClientCreator(types.NewBaseApplication())
default:
mustConnect := false // loop retrying
return NewRemoteClientCreator(addr, transport, mustConnect)
}
}

102
proxy/multi_app_conn.go Normal file
View File

@@ -0,0 +1,102 @@
package proxy
import (
cmn "github.com/tendermint/go-common"
cfg "github.com/tendermint/go-config"
)
//-----------------------------
// Tendermint's interface to the application consists of multiple connections
type AppConns interface {
cmn.Service
Mempool() AppConnMempool
Consensus() AppConnConsensus
Query() AppConnQuery
}
func NewAppConns(config cfg.Config, clientCreator ClientCreator, handshaker Handshaker) AppConns {
return NewMultiAppConn(config, clientCreator, handshaker)
}
//-----------------------------
// multiAppConn implements AppConns
type Handshaker interface {
Handshake(AppConns) error
}
// a multiAppConn is made of a few appConns (mempool, consensus, query)
// and manages their underlying abci clients, including the handshake
// which ensures the app and tendermint are synced.
// TODO: on app restart, clients must reboot together
type multiAppConn struct {
cmn.BaseService
config cfg.Config
handshaker Handshaker
mempoolConn *appConnMempool
consensusConn *appConnConsensus
queryConn *appConnQuery
clientCreator ClientCreator
}
// Make all necessary abci connections to the application
func NewMultiAppConn(config cfg.Config, clientCreator ClientCreator, handshaker Handshaker) *multiAppConn {
multiAppConn := &multiAppConn{
config: config,
handshaker: handshaker,
clientCreator: clientCreator,
}
multiAppConn.BaseService = *cmn.NewBaseService(log, "multiAppConn", multiAppConn)
return multiAppConn
}
// Returns the mempool connection
func (app *multiAppConn) Mempool() AppConnMempool {
return app.mempoolConn
}
// Returns the consensus Connection
func (app *multiAppConn) Consensus() AppConnConsensus {
return app.consensusConn
}
// Returns the query Connection
func (app *multiAppConn) Query() AppConnQuery {
return app.queryConn
}
func (app *multiAppConn) OnStart() error {
// query connection
querycli, err := app.clientCreator.NewABCIClient()
if err != nil {
return err
}
app.queryConn = NewAppConnQuery(querycli)
// mempool connection
memcli, err := app.clientCreator.NewABCIClient()
if err != nil {
return err
}
app.mempoolConn = NewAppConnMempool(memcli)
// consensus connection
concli, err := app.clientCreator.NewABCIClient()
if err != nil {
return err
}
app.consensusConn = NewAppConnConsensus(concli)
// ensure app is synced to the latest state
if app.handshaker != nil {
return app.handshaker.Handshake(app)
}
return nil
}

View File

@@ -1,23 +0,0 @@
package proxy
import (
tmspcli "github.com/tendermint/tmsp/client"
)
// This is goroutine-safe, but users should beware that
// the application in general is not meant to be interfaced
// with concurrent callers.
type remoteAppConn struct {
tmspcli.Client
}
func NewRemoteAppConn(addr, transport string) (*remoteAppConn, error) {
client, err := tmspcli.NewClient(addr, transport, false)
if err != nil {
return nil, err
}
appConn := &remoteAppConn{
Client: client,
}
return appConn, nil
}

View File

@@ -1,88 +0,0 @@
package proxy
import (
"strings"
"testing"
. "github.com/tendermint/go-common"
"github.com/tendermint/tmsp/example/dummy"
"github.com/tendermint/tmsp/server"
)
var SOCKET = "socket"
func TestEcho(t *testing.T) {
sockPath := Fmt("unix:///tmp/echo_%v.sock", RandStr(6))
// Start server
s, err := server.NewSocketServer(sockPath, dummy.NewDummyApplication())
if err != nil {
Exit(err.Error())
}
defer s.Stop()
// Start client
proxy, err := NewRemoteAppConn(sockPath, SOCKET)
if err != nil {
Exit(err.Error())
} else {
t.Log("Connected")
}
for i := 0; i < 1000; i++ {
proxy.EchoAsync(Fmt("echo-%v", i))
}
proxy.FlushSync()
}
func BenchmarkEcho(b *testing.B) {
b.StopTimer() // Initialize
sockPath := Fmt("unix:///tmp/echo_%v.sock", RandStr(6))
// Start server
s, err := server.NewSocketServer(sockPath, dummy.NewDummyApplication())
if err != nil {
Exit(err.Error())
}
defer s.Stop()
// Start client
proxy, err := NewRemoteAppConn(sockPath, SOCKET)
if err != nil {
Exit(err.Error())
} else {
b.Log("Connected")
}
echoString := strings.Repeat(" ", 200)
b.StartTimer() // Start benchmarking tests
for i := 0; i < b.N; i++ {
proxy.EchoAsync(echoString)
}
proxy.FlushSync()
b.StopTimer()
// info := proxy.InfoSync()
//b.Log("N: ", b.N, info)
}
func TestInfo(t *testing.T) {
sockPath := Fmt("unix:///tmp/echo_%v.sock", RandStr(6))
// Start server
s, err := server.NewSocketServer(sockPath, dummy.NewDummyApplication())
if err != nil {
Exit(err.Error())
}
defer s.Stop()
// Start client
proxy, err := NewRemoteAppConn(sockPath, SOCKET)
if err != nil {
Exit(err.Error())
} else {
t.Log("Connected")
}
res := proxy.InfoSync()
if res.IsErr() {
t.Errorf("Unexpected error: %v", err)
}
if string(res.Data) != "size:0" {
t.Error("Expected ResponseInfo with one element 'size:0' but got something else")
}
}

65
rpc/client/event_test.go Normal file
View File

@@ -0,0 +1,65 @@
package client_test
import (
"testing"
"time"
"github.com/stretchr/testify/require"
merktest "github.com/tendermint/merkleeyes/testutil"
"github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/types"
)
func TestHeaderEvents(t *testing.T) {
require := require.New(t)
for i, c := range GetClients() {
// start for this test it if it wasn't already running
if !c.IsRunning() {
// if so, then we start it, listen, and stop it.
st, err := c.Start()
require.Nil(err, "%d: %+v", i, err)
require.True(st, "%d", i)
defer c.Stop()
}
evtTyp := types.EventStringNewBlockHeader()
evt, err := client.WaitForOneEvent(c, evtTyp, 1*time.Second)
require.Nil(err, "%d: %+v", i, err)
_, ok := evt.(types.EventDataNewBlockHeader)
require.True(ok, "%d: %#v", i, evt)
// TODO: more checks...
}
}
func TestTxEvents(t *testing.T) {
require := require.New(t)
for i, c := range GetClients() {
// start for this test it if it wasn't already running
if !c.IsRunning() {
// if so, then we start it, listen, and stop it.
st, err := c.Start()
require.Nil(err, "%d: %+v", i, err)
require.True(st, "%d", i)
defer c.Stop()
}
// make the tx
_, _, tx := merktest.MakeTxKV()
evtTyp := types.EventStringTx(types.Tx(tx))
// send async
txres, err := c.BroadcastTxAsync(tx)
require.Nil(err, "%+v", err)
require.True(txres.Code.IsOK())
// and wait for confirmation
evt, err := client.WaitForOneEvent(c, evtTyp, 1*time.Second)
require.Nil(err, "%d: %+v", i, err)
// and make sure it has the proper info
txe, ok := evt.(types.EventDataTx)
require.True(ok, "%d: %#v", i, evt)
// make sure this is the proper tx
require.EqualValues(tx, txe.Tx)
require.True(txe.Code.IsOK())
}
}

88
rpc/client/helpers.go Normal file
View File

@@ -0,0 +1,88 @@
package client
import (
"time"
"github.com/pkg/errors"
cmn "github.com/tendermint/go-common"
events "github.com/tendermint/go-events"
"github.com/tendermint/tendermint/types"
)
// Waiter is informed of current height, decided whether to quit early
type Waiter func(delta int) (abort error)
// DefaultWaitStrategy is the standard backoff algorithm,
// but you can plug in another one
func DefaultWaitStrategy(delta int) (abort error) {
if delta > 10 {
return errors.Errorf("Waiting for %d blocks... aborting", delta)
} else if delta > 0 {
// estimate of wait time....
// wait half a second for the next block (in progress)
// plus one second for every full block
delay := time.Duration(delta-1)*time.Second + 500*time.Millisecond
time.Sleep(delay)
}
return nil
}
// Wait for height will poll status at reasonable intervals until
// the block at the given height is available.
//
// If waiter is nil, we use DefaultWaitStrategy, but you can also
// provide your own implementation
func WaitForHeight(c StatusClient, h int, waiter Waiter) error {
if waiter == nil {
waiter = DefaultWaitStrategy
}
delta := 1
for delta > 0 {
s, err := c.Status()
if err != nil {
return err
}
delta = h - s.LatestBlockHeight
// wait for the time, or abort early
if err := waiter(delta); err != nil {
return err
}
}
return nil
}
// WaitForOneEvent subscribes to a websocket event for the given
// event time and returns upon receiving it one time, or
// when the timeout duration has expired.
//
// This handles subscribing and unsubscribing under the hood
func WaitForOneEvent(evsw types.EventSwitch,
evtTyp string, timeout time.Duration) (types.TMEventData, error) {
listener := cmn.RandStr(12)
evts, quit := make(chan events.EventData, 10), make(chan bool, 1)
// start timeout count-down
go func() {
time.Sleep(timeout)
quit <- true
}()
// register for the next event of this type
evsw.AddListenerForEvent(listener, evtTyp, func(data events.EventData) {
evts <- data
})
// make sure to unregister after the test is over
defer evsw.RemoveListenerForEvent(evtTyp, listener)
// defer evsw.RemoveListener(listener) // this also works
select {
case <-quit:
return nil, errors.New("timed out waiting for event")
case evt := <-evts:
tmevt, ok := evt.(types.TMEventData)
if ok {
return tmevt, nil
}
return nil, errors.Errorf("Got unexpected event type: %#v", evt)
}
}

View File

@@ -0,0 +1,76 @@
package client_test
import (
"errors"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/rpc/client/mock"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
func TestWaitForHeight(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// test with error result - immediate failure
m := &mock.StatusMock{
Call: mock.Call{
Error: errors.New("bye"),
},
}
r := mock.NewStatusRecorder(m)
// connection failure always leads to error
err := client.WaitForHeight(r, 8, nil)
require.NotNil(err)
require.Equal("bye", err.Error())
// we called status once to check
require.Equal(1, len(r.Calls))
// now set current block height to 10
m.Call = mock.Call{
Response: &ctypes.ResultStatus{LatestBlockHeight: 10},
}
// we will not wait for more than 10 blocks
err = client.WaitForHeight(r, 40, nil)
require.NotNil(err)
require.True(strings.Contains(err.Error(), "aborting"))
// we called status once more to check
require.Equal(2, len(r.Calls))
// waiting for the past returns immediately
err = client.WaitForHeight(r, 5, nil)
require.Nil(err)
// we called status once more to check
require.Equal(3, len(r.Calls))
// since we can't update in a background goroutine (test --race)
// we use the callback to update the status height
myWaiter := func(delta int) error {
// update the height for the next call
m.Call.Response = &ctypes.ResultStatus{LatestBlockHeight: 15}
return client.DefaultWaitStrategy(delta)
}
// we wait for a few blocks
err = client.WaitForHeight(r, 12, myWaiter)
require.Nil(err)
// we called status once to check
require.Equal(5, len(r.Calls))
pre := r.Calls[3]
require.Nil(pre.Error)
prer, ok := pre.Response.(*ctypes.ResultStatus)
require.True(ok)
assert.Equal(10, prer.LatestBlockHeight)
post := r.Calls[4]
require.Nil(post.Error)
postr, ok := post.Response.(*ctypes.ResultStatus)
require.True(ok)
assert.Equal(15, postr.LatestBlockHeight)
}

366
rpc/client/httpclient.go Normal file
View File

@@ -0,0 +1,366 @@
package client
import (
"fmt"
"github.com/pkg/errors"
events "github.com/tendermint/go-events"
"github.com/tendermint/go-rpc/client"
wire "github.com/tendermint/go-wire"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
)
/*
HTTP is a Client implementation that communicates
with a tendermint node over json rpc and websockets.
This is the main implementation you probably want to use in
production code. There are other implementations when calling
the tendermint node in-process (local), or when you want to mock
out the server for test code (mock).
*/
type HTTP struct {
remote string
rpc *rpcclient.JSONRPCClient
*WSEvents
}
// New takes a remote endpoint in the form tcp://<host>:<port>
// and the websocket path (which always seems to be "/websocket")
func NewHTTP(remote, wsEndpoint string) *HTTP {
return &HTTP{
rpc: rpcclient.NewJSONRPCClient(remote),
remote: remote,
WSEvents: newWSEvents(remote, wsEndpoint),
}
}
func (c *HTTP) _assertIsClient() Client {
return c
}
func (c *HTTP) _assertIsNetworkClient() NetworkClient {
return c
}
func (c *HTTP) _assertIsEventSwitch() types.EventSwitch {
return c
}
func (c *HTTP) Status() (*ctypes.ResultStatus, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("status", map[string]interface{}{}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "Status")
}
// note: panics if rpc doesn't match. okay???
return (*tmResult).(*ctypes.ResultStatus), nil
}
func (c *HTTP) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("abci_info", map[string]interface{}{}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "ABCIInfo")
}
return (*tmResult).(*ctypes.ResultABCIInfo), nil
}
func (c *HTTP) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("abci_query",
map[string]interface{}{"path": path, "data": data, "prove": prove},
tmResult)
if err != nil {
return nil, errors.Wrap(err, "ABCIQuery")
}
return (*tmResult).(*ctypes.ResultABCIQuery), nil
}
func (c *HTTP) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "broadcast_tx_commit")
}
return (*tmResult).(*ctypes.ResultBroadcastTxCommit), nil
}
func (c *HTTP) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.broadcastTX("broadcast_tx_async", tx)
}
func (c *HTTP) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.broadcastTX("broadcast_tx_sync", tx)
}
func (c *HTTP) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call(route, map[string]interface{}{"tx": tx}, tmResult)
if err != nil {
return nil, errors.Wrap(err, route)
}
return (*tmResult).(*ctypes.ResultBroadcastTx), nil
}
func (c *HTTP) NetInfo() (*ctypes.ResultNetInfo, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("net_info", map[string]interface{}{}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "NetInfo")
}
return (*tmResult).(*ctypes.ResultNetInfo), nil
}
func (c *HTTP) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("dump_consensus_state", map[string]interface{}{}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "DumpConsensusState")
}
return (*tmResult).(*ctypes.ResultDumpConsensusState), nil
}
func (c *HTTP) BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("blockchain",
map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight},
tmResult)
if err != nil {
return nil, errors.Wrap(err, "BlockchainInfo")
}
return (*tmResult).(*ctypes.ResultBlockchainInfo), nil
}
func (c *HTTP) Genesis() (*ctypes.ResultGenesis, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("genesis", map[string]interface{}{}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "Genesis")
}
return (*tmResult).(*ctypes.ResultGenesis), nil
}
func (c *HTTP) Block(height int) (*ctypes.ResultBlock, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("block", map[string]interface{}{"height": height}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "Block")
}
return (*tmResult).(*ctypes.ResultBlock), nil
}
func (c *HTTP) Commit(height int) (*ctypes.ResultCommit, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("commit", map[string]interface{}{"height": height}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "Commit")
}
return (*tmResult).(*ctypes.ResultCommit), nil
}
func (c *HTTP) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
tmResult := new(ctypes.TMResult)
query := map[string]interface{}{
"hash": hash,
"prove": prove,
}
_, err := c.rpc.Call("tx", query, tmResult)
if err != nil {
return nil, errors.Wrap(err, "Tx")
}
return (*tmResult).(*ctypes.ResultTx), nil
}
func (c *HTTP) Validators() (*ctypes.ResultValidators, error) {
tmResult := new(ctypes.TMResult)
_, err := c.rpc.Call("validators", map[string]interface{}{}, tmResult)
if err != nil {
return nil, errors.Wrap(err, "Validators")
}
return (*tmResult).(*ctypes.ResultValidators), nil
}
/** websocket event stuff here... **/
type WSEvents struct {
types.EventSwitch
remote string
endpoint string
ws *rpcclient.WSClient
// used for signaling the goroutine that feeds ws -> EventSwitch
quit chan bool
done chan bool
// used to maintain counts of actively listened events
// so we can properly subscribe/unsubscribe
// FIXME: thread-safety???
// FIXME: reuse code from go-events???
evtCount map[string]int // count how many time each event is subscribed
listeners map[string][]string // keep track of which events each listener is listening to
}
func newWSEvents(remote, endpoint string) *WSEvents {
return &WSEvents{
EventSwitch: types.NewEventSwitch(),
endpoint: endpoint,
remote: remote,
quit: make(chan bool, 1),
done: make(chan bool, 1),
evtCount: map[string]int{},
listeners: map[string][]string{},
}
}
func (w *WSEvents) _assertIsEventSwitch() types.EventSwitch {
return w
}
// Start is the only way I could think the extend OnStart from
// events.eventSwitch. If only it wasn't private...
// BaseService.Start -> eventSwitch.OnStart -> WSEvents.Start
func (w *WSEvents) Start() (bool, error) {
st, err := w.EventSwitch.Start()
// if we did start, then OnStart here...
if st && err == nil {
ws := rpcclient.NewWSClient(w.remote, w.endpoint)
_, err = ws.Start()
if err == nil {
w.ws = ws
go w.eventListener()
}
}
return st, errors.Wrap(err, "StartWSEvent")
}
// Stop wraps the BaseService/eventSwitch actions as Start does
func (w *WSEvents) Stop() bool {
stop := w.EventSwitch.Stop()
if stop {
// send a message to quit to stop the eventListener
w.quit <- true
<-w.done
w.ws.Stop()
w.ws = nil
}
return stop
}
/** TODO: more intelligent subscriptions! **/
func (w *WSEvents) AddListenerForEvent(listenerID, event string, cb events.EventCallback) {
// no one listening -> subscribe
if w.evtCount[event] == 0 {
w.subscribe(event)
}
// if this listener was already listening to this event, return early
for _, s := range w.listeners[listenerID] {
if event == s {
return
}
}
// otherwise, add this event to this listener
w.evtCount[event] += 1
w.listeners[listenerID] = append(w.listeners[listenerID], event)
w.EventSwitch.AddListenerForEvent(listenerID, event, cb)
}
func (w *WSEvents) RemoveListenerForEvent(event string, listenerID string) {
// if this listener is listening already, splice it out
found := false
l := w.listeners[listenerID]
for i, s := range l {
if event == s {
found = true
w.listeners[listenerID] = append(l[:i], l[i+1:]...)
break
}
}
// if the listener wasn't already listening to the event, exit early
if !found {
return
}
// now we can update the subscriptions
w.evtCount[event] -= 1
if w.evtCount[event] == 0 {
w.unsubscribe(event)
}
w.EventSwitch.RemoveListenerForEvent(event, listenerID)
}
func (w *WSEvents) RemoveListener(listenerID string) {
// remove all counts for this listener
for _, s := range w.listeners[listenerID] {
w.evtCount[s] -= 1
if w.evtCount[s] == 0 {
w.unsubscribe(s)
}
}
w.listeners[listenerID] = nil
// then let the switch do it's magic
w.EventSwitch.RemoveListener(listenerID)
}
// eventListener is an infinite loop pulling all websocket events
// and pushing them to the EventSwitch.
//
// the goroutine only stops by closing quit
func (w *WSEvents) eventListener() {
for {
select {
case res := <-w.ws.ResultsCh:
// res is json.RawMessage
err := w.parseEvent(res)
if err != nil {
// FIXME: better logging/handling of errors??
fmt.Printf("ws result: %+v\n", err)
}
case err := <-w.ws.ErrorsCh:
// FIXME: better logging/handling of errors??
fmt.Printf("ws err: %+v\n", err)
case <-w.quit:
// send a message so we can wait for the routine to exit
// before cleaning up the w.ws stuff
w.done <- true
return
}
}
}
// parseEvent unmarshals the json message and converts it into
// some implementation of types.TMEventData, and sends it off
// on the merry way to the EventSwitch
func (w *WSEvents) parseEvent(data []byte) (err error) {
result := new(ctypes.TMResult)
wire.ReadJSONPtr(result, data, &err)
if err != nil {
return err
}
event, ok := (*result).(*ctypes.ResultEvent)
if !ok {
// ignore silently (eg. subscribe, unsubscribe and maybe other events)
return nil
}
// looks good! let's fire this baby!
w.EventSwitch.FireEvent(event.Name, event.Data)
return nil
}
// no way of exposing these failures, so we panic.
// is this right? or silently ignore???
func (w *WSEvents) subscribe(event string) {
err := w.ws.Subscribe(event)
if err != nil {
panic(err)
}
}
func (w *WSEvents) unsubscribe(event string) {
err := w.ws.Unsubscribe(event)
if err != nil {
panic(err)
}
}

83
rpc/client/interface.go Normal file
View File

@@ -0,0 +1,83 @@
/*
package client provides a general purpose interface (Client) for connecting
to a tendermint node, as well as higher-level functionality.
The main implementation for production code is client.HTTP, which
connects via http to the jsonrpc interface of the tendermint node.
For connecting to a node running in the same process (eg. when
compiling the abci app in the same process), you can use the client.Local
implementation.
For mocking out server responses during testing to see behavior for
arbitrary return values, use the mock package.
In addition to the Client interface, which should be used externally
for maximum flexibility and testability, and two implementations,
this package also provides helper functions that work on any Client
implementation.
*/
package client
import (
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
)
// ABCIClient groups together the functionality that principally
// affects the ABCI app. In many cases this will be all we want,
// so we can accept an interface which is easier to mock
type ABCIClient interface {
// reading from abci app
ABCIInfo() (*ctypes.ResultABCIInfo, error)
ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error)
// writing to abci app
BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error)
BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error)
BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error)
}
// SignClient groups together the interfaces need to get valid
// signatures and prove anything about the chain
type SignClient interface {
Block(height int) (*ctypes.ResultBlock, error)
Commit(height int) (*ctypes.ResultCommit, error)
Validators() (*ctypes.ResultValidators, error)
Tx(hash []byte, prove bool) (*ctypes.ResultTx, error)
}
// HistoryClient shows us data from genesis to now in large chunks.
type HistoryClient interface {
Genesis() (*ctypes.ResultGenesis, error)
BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error)
}
type StatusClient interface {
// general chain info
Status() (*ctypes.ResultStatus, error)
}
// Client wraps most important rpc calls a client would make
// if you want to listen for events, test if it also
// implements events.EventSwitch
type Client interface {
ABCIClient
SignClient
HistoryClient
StatusClient
// this Client is reactive, you can subscribe to any TMEventData
// type, given the proper string. see tendermint/types/events.go
types.EventSwitch
}
// NetworkClient is general info about the network state. May not
// be needed usually.
//
// Not included in the Client interface, but generally implemented
// by concrete implementations.
type NetworkClient interface {
NetInfo() (*ctypes.ResultNetInfo, error)
DumpConsensusState() (*ctypes.ResultDumpConsensusState, error)
}

109
rpc/client/localclient.go Normal file
View File

@@ -0,0 +1,109 @@
package client
import (
nm "github.com/tendermint/tendermint/node"
"github.com/tendermint/tendermint/rpc/core"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
)
/*
Local is a Client implementation that directly executes the rpc
functions on a given node, without going through HTTP or GRPC
This implementation is useful for:
* Running tests against a node in-process without the overhead
of going through an http server
* Communication between an ABCI app and tendermin core when they
are compiled in process.
For real clients, you probably want to use client.HTTP. For more
powerful control during testing, you probably want the "client/mock" package.
*/
type Local struct {
node *nm.Node
types.EventSwitch
}
// NewLocal configures a client that calls the Node directly.
//
// Note that given how rpc/core works with package singletons, that
// you can only have one node per process. So make sure test cases
// don't run in parallel, or try to simulate an entire network in
// one process...
func NewLocal(node *nm.Node) Local {
node.ConfigureRPC()
return Local{
node: node,
EventSwitch: node.EventSwitch(),
}
}
func (c Local) _assertIsClient() Client {
return c
}
func (c Local) _assertIsNetworkClient() NetworkClient {
return c
}
func (c Local) Status() (*ctypes.ResultStatus, error) {
return core.Status()
}
func (c Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
return core.ABCIInfo()
}
func (c Local) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
return core.ABCIQuery(path, data, prove)
}
func (c Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
return core.BroadcastTxCommit(tx)
}
func (c Local) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return core.BroadcastTxAsync(tx)
}
func (c Local) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return core.BroadcastTxSync(tx)
}
func (c Local) NetInfo() (*ctypes.ResultNetInfo, error) {
return core.NetInfo()
}
func (c Local) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) {
return core.DumpConsensusState()
}
func (c Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
return core.UnsafeDialSeeds(seeds)
}
func (c Local) BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) {
return core.BlockchainInfo(minHeight, maxHeight)
}
func (c Local) Genesis() (*ctypes.ResultGenesis, error) {
return core.Genesis()
}
func (c Local) Block(height int) (*ctypes.ResultBlock, error) {
return core.Block(height)
}
func (c Local) Commit(height int) (*ctypes.ResultCommit, error) {
return core.Commit(height)
}
func (c Local) Validators() (*ctypes.ResultValidators, error) {
return core.Validators()
}
func (c Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
return core.Tx(hash, prove)
}

24
rpc/client/main_test.go Normal file
View File

@@ -0,0 +1,24 @@
package client_test
import (
"os"
"testing"
meapp "github.com/tendermint/merkleeyes/app"
nm "github.com/tendermint/tendermint/node"
rpctest "github.com/tendermint/tendermint/rpc/test"
)
var node *nm.Node
func TestMain(m *testing.M) {
// start a tendermint node (and merkleeyes) in the background to test against
app := meapp.NewMerkleEyesApp("", 100)
node = rpctest.StartTendermint(app)
code := m.Run()
// and shut down proper at the end
node.Stop()
node.Wait()
os.Exit(code)
}

194
rpc/client/mock/abci.go Normal file
View File

@@ -0,0 +1,194 @@
package mock
import (
abci "github.com/tendermint/abci/types"
"github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
)
// ABCIApp will send all abci related request to the named app,
// so you can test app behavior from a client without needing
// an entire tendermint node
type ABCIApp struct {
App abci.Application
}
func (a ABCIApp) _assertABCIClient() client.ABCIClient {
return a
}
func (a ABCIApp) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
return &ctypes.ResultABCIInfo{a.App.Info()}, nil
}
func (a ABCIApp) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
q := a.App.Query(abci.RequestQuery{data, path, 0, prove})
return &ctypes.ResultABCIQuery{q}, nil
}
func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
res := ctypes.ResultBroadcastTxCommit{}
c := a.App.CheckTx(tx)
res.CheckTx = &abci.ResponseCheckTx{c.Code, c.Data, c.Log}
if !c.IsOK() {
return &res, nil
}
d := a.App.DeliverTx(tx)
res.DeliverTx = &abci.ResponseDeliverTx{d.Code, d.Data, d.Log}
return &res, nil
}
func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
c := a.App.CheckTx(tx)
// and this gets writen in a background thread...
if c.IsOK() {
go func() { a.App.DeliverTx(tx) }()
}
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
}
func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
c := a.App.CheckTx(tx)
// and this gets writen in a background thread...
if c.IsOK() {
go func() { a.App.DeliverTx(tx) }()
}
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
}
// ABCIMock will send all abci related request to the named app,
// so you can test app behavior from a client without needing
// an entire tendermint node
type ABCIMock struct {
Info Call
Query Call
BroadcastCommit Call
Broadcast Call
}
func (m ABCIMock) _assertABCIClient() client.ABCIClient {
return m
}
func (m ABCIMock) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
res, err := m.Info.GetResponse(nil)
if err != nil {
return nil, err
}
return &ctypes.ResultABCIInfo{res.(abci.ResponseInfo)}, nil
}
func (m ABCIMock) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
res, err := m.Query.GetResponse(QueryArgs{path, data, prove})
if err != nil {
return nil, err
}
return &ctypes.ResultABCIQuery{res.(abci.ResponseQuery)}, nil
}
func (m ABCIMock) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
res, err := m.BroadcastCommit.GetResponse(tx)
if err != nil {
return nil, err
}
return res.(*ctypes.ResultBroadcastTxCommit), nil
}
func (m ABCIMock) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
res, err := m.Broadcast.GetResponse(tx)
if err != nil {
return nil, err
}
return res.(*ctypes.ResultBroadcastTx), nil
}
func (m ABCIMock) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
res, err := m.Broadcast.GetResponse(tx)
if err != nil {
return nil, err
}
return res.(*ctypes.ResultBroadcastTx), nil
}
// ABCIRecorder can wrap another type (ABCIApp, ABCIMock, or Client)
// and record all ABCI related calls.
type ABCIRecorder struct {
Client client.ABCIClient
Calls []Call
}
func NewABCIRecorder(client client.ABCIClient) *ABCIRecorder {
return &ABCIRecorder{
Client: client,
Calls: []Call{},
}
}
func (r *ABCIRecorder) _assertABCIClient() client.ABCIClient {
return r
}
type QueryArgs struct {
Path string
Data []byte
Prove bool
}
func (r *ABCIRecorder) addCall(call Call) {
r.Calls = append(r.Calls, call)
}
func (r *ABCIRecorder) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
res, err := r.Client.ABCIInfo()
r.addCall(Call{
Name: "abci_info",
Response: res,
Error: err,
})
return res, err
}
func (r *ABCIRecorder) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
res, err := r.Client.ABCIQuery(path, data, prove)
r.addCall(Call{
Name: "abci_query",
Args: QueryArgs{path, data, prove},
Response: res,
Error: err,
})
return res, err
}
func (r *ABCIRecorder) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
res, err := r.Client.BroadcastTxCommit(tx)
r.addCall(Call{
Name: "broadcast_tx_commit",
Args: tx,
Response: res,
Error: err,
})
return res, err
}
func (r *ABCIRecorder) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
res, err := r.Client.BroadcastTxAsync(tx)
r.addCall(Call{
Name: "broadcast_tx_async",
Args: tx,
Response: res,
Error: err,
})
return res, err
}
func (r *ABCIRecorder) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
res, err := r.Client.BroadcastTxSync(tx)
r.addCall(Call{
Name: "broadcast_tx_sync",
Args: tx,
Response: res,
Error: err,
})
return res, err
}

View File

@@ -0,0 +1,169 @@
package mock_test
import (
"fmt"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/abci/example/dummy"
abci "github.com/tendermint/abci/types"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/rpc/client/mock"
)
func TestABCIMock(t *testing.T) {
assert, require := assert.New(t), require.New(t)
key, value := []byte("foo"), []byte("bar")
height := uint64(10)
goodTx := types.Tx{0x01, 0xff}
badTx := types.Tx{0x12, 0x21}
m := mock.ABCIMock{
Info: mock.Call{Error: errors.New("foobar")},
Query: mock.Call{Response: abci.ResponseQuery{
Key: key,
Value: value,
Height: height,
}},
// Broadcast commit depends on call
BroadcastCommit: mock.Call{
Args: goodTx,
Response: &ctypes.ResultBroadcastTxCommit{
CheckTx: &abci.ResponseCheckTx{Data: []byte("stand")},
DeliverTx: &abci.ResponseDeliverTx{Data: []byte("deliver")},
},
Error: errors.New("bad tx"),
},
Broadcast: mock.Call{Error: errors.New("must commit")},
}
// now, let's try to make some calls
_, err := m.ABCIInfo()
require.NotNil(err)
assert.Equal("foobar", err.Error())
// query always returns the response
query, err := m.ABCIQuery("/", nil, false)
require.Nil(err)
require.NotNil(query)
assert.Equal(key, query.Response.GetKey())
assert.Equal(value, query.Response.GetValue())
assert.Equal(height, query.Response.GetHeight())
// non-commit calls always return errors
_, err = m.BroadcastTxSync(goodTx)
require.NotNil(err)
assert.Equal("must commit", err.Error())
_, err = m.BroadcastTxAsync(goodTx)
require.NotNil(err)
assert.Equal("must commit", err.Error())
// commit depends on the input
_, err = m.BroadcastTxCommit(badTx)
require.NotNil(err)
assert.Equal("bad tx", err.Error())
bres, err := m.BroadcastTxCommit(goodTx)
require.Nil(err, "%+v", err)
assert.EqualValues(0, bres.CheckTx.Code)
assert.EqualValues("stand", bres.CheckTx.Data)
assert.EqualValues("deliver", bres.DeliverTx.Data)
}
func TestABCIRecorder(t *testing.T) {
assert, require := assert.New(t), require.New(t)
m := mock.ABCIMock{
Info: mock.Call{Response: abci.ResponseInfo{
Data: "data",
Version: "v0.9.9",
}},
Query: mock.Call{Error: errors.New("query")},
Broadcast: mock.Call{Error: errors.New("broadcast")},
BroadcastCommit: mock.Call{Error: errors.New("broadcast_commit")},
}
r := mock.NewABCIRecorder(m)
require.Equal(0, len(r.Calls))
r.ABCIInfo()
r.ABCIQuery("path", []byte("data"), true)
require.Equal(2, len(r.Calls))
info := r.Calls[0]
assert.Equal("abci_info", info.Name)
assert.Nil(info.Error)
assert.Nil(info.Args)
require.NotNil(info.Response)
ir, ok := info.Response.(*ctypes.ResultABCIInfo)
require.True(ok)
assert.Equal("data", ir.Response.Data)
assert.Equal("v0.9.9", ir.Response.Version)
query := r.Calls[1]
assert.Equal("abci_query", query.Name)
assert.Nil(query.Response)
require.NotNil(query.Error)
assert.Equal("query", query.Error.Error())
require.NotNil(query.Args)
qa, ok := query.Args.(mock.QueryArgs)
require.True(ok)
assert.Equal("path", qa.Path)
assert.EqualValues("data", qa.Data)
assert.True(qa.Prove)
// now add some broadcasts
txs := []types.Tx{{1}, {2}, {3}}
r.BroadcastTxCommit(txs[0])
r.BroadcastTxSync(txs[1])
r.BroadcastTxAsync(txs[2])
require.Equal(5, len(r.Calls))
bc := r.Calls[2]
assert.Equal("broadcast_tx_commit", bc.Name)
assert.Nil(bc.Response)
require.NotNil(bc.Error)
assert.EqualValues(bc.Args, txs[0])
bs := r.Calls[3]
assert.Equal("broadcast_tx_sync", bs.Name)
assert.Nil(bs.Response)
require.NotNil(bs.Error)
assert.EqualValues(bs.Args, txs[1])
ba := r.Calls[4]
assert.Equal("broadcast_tx_async", ba.Name)
assert.Nil(ba.Response)
require.NotNil(ba.Error)
assert.EqualValues(ba.Args, txs[2])
}
func TestABCIApp(t *testing.T) {
assert, require := assert.New(t), require.New(t)
app := dummy.NewDummyApplication()
m := mock.ABCIApp{app}
// get some info
info, err := m.ABCIInfo()
require.Nil(err)
assert.Equal(`{"size":0}`, info.Response.GetData())
// add a key
key, value := "foo", "bar"
tx := fmt.Sprintf("%s=%s", key, value)
res, err := m.BroadcastTxCommit(types.Tx(tx))
require.Nil(err)
assert.True(res.CheckTx.Code.IsOK())
require.NotNil(res.DeliverTx)
assert.True(res.DeliverTx.Code.IsOK())
// check the key
qres, err := m.ABCIQuery("/key", []byte(key), false)
require.Nil(err)
assert.EqualValues(value, qres.Response.Value)
}

128
rpc/client/mock/client.go Normal file
View File

@@ -0,0 +1,128 @@
/*
package mock returns a Client implementation that
accepts various (mock) implementations of the various methods.
This implementation is useful for using in tests, when you don't
need a real server, but want a high-level of control about
the server response you want to mock (eg. error handling),
or if you just want to record the calls to verify in your tests.
For real clients, you probably want the "http" package. If you
want to directly call a tendermint node in process, you can use the
"local" package.
*/
package mock
import (
"reflect"
"github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/rpc/core"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
)
// Client wraps arbitrary implementations of the various interfaces.
//
// We provide a few choices to mock out each one in this package.
// Nothing hidden here, so no New function, just construct it from
// some parts, and swap them out them during the tests.
type Client struct {
client.ABCIClient
client.SignClient
client.HistoryClient
client.StatusClient
// create a mock with types.NewEventSwitch()
types.EventSwitch
}
func (c Client) _assertIsClient() client.Client {
return c
}
// Call is used by recorders to save a call and response.
// It can also be used to configure mock responses.
//
type Call struct {
Name string
Args interface{}
Response interface{}
Error error
}
// GetResponse will generate the apporiate response for us, when
// using the Call struct to configure a Mock handler.
//
// When configuring a response, if only one of Response or Error is
// set then that will always be returned. If both are set, then
// we return Response if the Args match the set args, Error otherwise.
func (c Call) GetResponse(args interface{}) (interface{}, error) {
// handle the case with no response
if c.Response == nil {
if c.Error == nil {
panic("Misconfigured call, you must set either Response or Error")
}
return nil, c.Error
}
// response without error
if c.Error == nil {
return c.Response, nil
}
// have both, we must check args....
if reflect.DeepEqual(args, c.Args) {
return c.Response, nil
}
return nil, c.Error
}
func (c Client) Status() (*ctypes.ResultStatus, error) {
return core.Status()
}
func (c Client) ABCIInfo() (*ctypes.ResultABCIInfo, error) {
return core.ABCIInfo()
}
func (c Client) ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
return core.ABCIQuery(path, data, prove)
}
func (c Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
return core.BroadcastTxCommit(tx)
}
func (c Client) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return core.BroadcastTxAsync(tx)
}
func (c Client) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return core.BroadcastTxSync(tx)
}
func (c Client) NetInfo() (*ctypes.ResultNetInfo, error) {
return core.NetInfo()
}
func (c Client) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) {
return core.UnsafeDialSeeds(seeds)
}
func (c Client) BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) {
return core.BlockchainInfo(minHeight, maxHeight)
}
func (c Client) Genesis() (*ctypes.ResultGenesis, error) {
return core.Genesis()
}
func (c Client) Block(height int) (*ctypes.ResultBlock, error) {
return core.Block(height)
}
func (c Client) Commit(height int) (*ctypes.ResultCommit, error) {
return core.Commit(height)
}
func (c Client) Validators() (*ctypes.ResultValidators, error) {
return core.Validators()
}

55
rpc/client/mock/status.go Normal file
View File

@@ -0,0 +1,55 @@
package mock
import (
"github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
// StatusMock returns the result specified by the Call
type StatusMock struct {
Call
}
func (m *StatusMock) _assertStatusClient() client.StatusClient {
return m
}
func (m *StatusMock) Status() (*ctypes.ResultStatus, error) {
res, err := m.GetResponse(nil)
if err != nil {
return nil, err
}
return res.(*ctypes.ResultStatus), nil
}
// StatusRecorder can wrap another type (StatusMock, full client)
// and record the status calls
type StatusRecorder struct {
Client client.StatusClient
Calls []Call
}
func NewStatusRecorder(client client.StatusClient) *StatusRecorder {
return &StatusRecorder{
Client: client,
Calls: []Call{},
}
}
func (r *StatusRecorder) _assertStatusClient() client.StatusClient {
return r
}
func (r *StatusRecorder) addCall(call Call) {
r.Calls = append(r.Calls, call)
}
func (r *StatusRecorder) Status() (*ctypes.ResultStatus, error) {
res, err := r.Client.Status()
r.addCall(Call{
Name: "status",
Response: res,
Error: err,
})
return res, err
}

View File

@@ -0,0 +1,45 @@
package mock_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/rpc/client/mock"
)
func TestStatus(t *testing.T) {
assert, require := assert.New(t), require.New(t)
m := &mock.StatusMock{
Call: mock.Call{
Response: &ctypes.ResultStatus{
LatestBlockHash: []byte("block"),
LatestAppHash: []byte("app"),
LatestBlockHeight: 10,
}},
}
r := mock.NewStatusRecorder(m)
require.Equal(0, len(r.Calls))
// make sure response works proper
status, err := r.Status()
require.Nil(err, "%+v", err)
assert.EqualValues("block", status.LatestBlockHash)
assert.EqualValues(10, status.LatestBlockHeight)
// make sure recorder works properly
require.Equal(1, len(r.Calls))
rs := r.Calls[0]
assert.Equal("status", rs.Name)
assert.Nil(rs.Args)
assert.Nil(rs.Error)
require.NotNil(rs.Response)
st, ok := rs.Response.(*ctypes.ResultStatus)
require.True(ok)
assert.EqualValues("block", st.LatestBlockHash)
assert.EqualValues(10, st.LatestBlockHeight)
}

188
rpc/client/rpc_test.go Normal file
View File

@@ -0,0 +1,188 @@
package client_test
import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
merkle "github.com/tendermint/go-merkle"
merktest "github.com/tendermint/merkleeyes/testutil"
"github.com/tendermint/tendermint/rpc/client"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/types"
)
func getHTTPClient() *client.HTTP {
rpcAddr := rpctest.GetConfig().GetString("rpc_laddr")
return client.NewHTTP(rpcAddr, "/websocket")
}
func getLocalClient() client.Local {
return client.NewLocal(node)
}
// GetClients returns a slice of clients for table-driven tests
func GetClients() []client.Client {
return []client.Client{
getHTTPClient(),
getLocalClient(),
}
}
// Make sure status is correct (we connect properly)
func TestStatus(t *testing.T) {
for i, c := range GetClients() {
chainID := rpctest.GetConfig().GetString("chain_id")
status, err := c.Status()
require.Nil(t, err, "%d: %+v", i, err)
assert.Equal(t, chainID, status.NodeInfo.Network)
}
}
// Make sure info is correct (we connect properly)
func TestInfo(t *testing.T) {
for i, c := range GetClients() {
// status, err := c.Status()
// require.Nil(t, err, "%+v", err)
info, err := c.ABCIInfo()
require.Nil(t, err, "%d: %+v", i, err)
// TODO: this is not correct - fix merkleeyes!
// assert.EqualValues(t, status.LatestBlockHeight, info.Response.LastBlockHeight)
assert.True(t, strings.HasPrefix(info.Response.Data, "size"))
}
}
func TestNetInfo(t *testing.T) {
for i, c := range GetClients() {
nc, ok := c.(client.NetworkClient)
require.True(t, ok, "%d", i)
netinfo, err := nc.NetInfo()
require.Nil(t, err, "%d: %+v", i, err)
assert.True(t, netinfo.Listening)
assert.Equal(t, 0, len(netinfo.Peers))
}
}
func TestDumpConsensusState(t *testing.T) {
for i, c := range GetClients() {
// FIXME: fix server so it doesn't panic on invalid input
nc, ok := c.(client.NetworkClient)
require.True(t, ok, "%d", i)
cons, err := nc.DumpConsensusState()
require.Nil(t, err, "%d: %+v", i, err)
assert.NotEmpty(t, cons.RoundState)
assert.Empty(t, cons.PeerRoundStates)
}
}
func TestGenesisAndValidators(t *testing.T) {
for i, c := range GetClients() {
chainID := rpctest.GetConfig().GetString("chain_id")
// make sure this is the right genesis file
gen, err := c.Genesis()
require.Nil(t, err, "%d: %+v", i, err)
assert.Equal(t, chainID, gen.Genesis.ChainID)
// get the genesis validator
require.Equal(t, 1, len(gen.Genesis.Validators))
gval := gen.Genesis.Validators[0]
// get the current validators
vals, err := c.Validators()
require.Nil(t, err, "%d: %+v", i, err)
require.Equal(t, 1, len(vals.Validators))
val := vals.Validators[0]
// make sure the current set is also the genesis set
assert.Equal(t, gval.Amount, val.VotingPower)
assert.Equal(t, gval.PubKey, val.PubKey)
}
}
// Make some app checks
func TestAppCalls(t *testing.T) {
assert, require := assert.New(t), require.New(t)
for i, c := range GetClients() {
// get an offset of height to avoid racing and guessing
s, err := c.Status()
require.Nil(err, "%d: %+v", i, err)
// sh is start height or status height
sh := s.LatestBlockHeight
// look for the future
_, err = c.Block(sh + 2)
assert.NotNil(err) // no block yet
// write something
k, v, tx := merktest.MakeTxKV()
bres, err := c.BroadcastTxCommit(tx)
require.Nil(err, "%d: %+v", i, err)
require.True(bres.DeliverTx.GetCode().IsOK())
txh := bres.Height
apph := txh + 1 // this is where the tx will be applied to the state
// wait before querying
client.WaitForHeight(c, apph, nil)
qres, err := c.ABCIQuery("/key", k, false)
if assert.Nil(err) && assert.True(qres.Response.Code.IsOK()) {
data := qres.Response
// assert.Equal(k, data.GetKey()) // only returned for proofs
assert.Equal(v, data.GetValue())
}
// make sure we can lookup the tx with proof
// ptx, err := c.Tx(bres.Hash, true)
ptx, err := c.Tx(bres.Hash, true)
require.Nil(err, "%d: %+v", i, err)
assert.Equal(txh, ptx.Height)
assert.Equal(types.Tx(tx), ptx.Tx)
// and we can even check the block is added
block, err := c.Block(apph)
require.Nil(err, "%d: %+v", i, err)
appHash := block.BlockMeta.Header.AppHash
assert.True(len(appHash) > 0)
assert.EqualValues(apph, block.BlockMeta.Header.Height)
// check blockchain info, now that we know there is info
// TODO: is this commented somewhere that they are returned
// in order of descending height???
info, err := c.BlockchainInfo(apph, apph)
require.Nil(err, "%d: %+v", i, err)
assert.True(info.LastHeight >= apph)
if assert.Equal(1, len(info.BlockMetas)) {
lastMeta := info.BlockMetas[0]
assert.EqualValues(apph, lastMeta.Header.Height)
bMeta := block.BlockMeta
assert.Equal(bMeta.Header.AppHash, lastMeta.Header.AppHash)
assert.Equal(bMeta.BlockID, lastMeta.BlockID)
}
// and get the corresponding commit with the same apphash
commit, err := c.Commit(apph)
require.Nil(err, "%d: %+v", i, err)
cappHash := commit.Header.AppHash
assert.Equal(appHash, cappHash)
assert.NotNil(commit.Commit)
// compare the commits (note Commit(2) has commit from Block(3))
commit2, err := c.Commit(apph - 1)
require.Nil(err, "%d: %+v", i, err)
assert.Equal(block.Block.LastCommit, commit2.Commit)
// and we got a proof that works!
pres, err := c.ABCIQuery("/key", k, true)
if assert.Nil(err) && assert.True(pres.Response.Code.IsOK()) {
proof, err := merkle.ReadProof(pres.Response.GetProof())
if assert.Nil(err) {
key := pres.Response.GetKey()
value := pres.Response.GetValue()
assert.Equal(appHash, proof.RootHash)
valid := proof.Verify(key, value, appHash)
assert.True(valid)
}
}
}
}

29
rpc/core/abci.go Normal file
View File

@@ -0,0 +1,29 @@
package core
import (
abci "github.com/tendermint/abci/types"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
//-----------------------------------------------------------------------------
func ABCIQuery(path string, data []byte, prove bool) (*ctypes.ResultABCIQuery, error) {
resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{
Path: path,
Data: data,
Prove: prove,
})
if err != nil {
return nil, err
}
log.Info("ABCIQuery", "path", path, "data", data, "result", resQuery)
return &ctypes.ResultABCIQuery{resQuery}, nil
}
func ABCIInfo() (*ctypes.ResultABCIInfo, error) {
resInfo, err := proxyAppQuery.InfoSync()
if err != nil {
return nil, err
}
return &ctypes.ResultABCIInfo{resInfo}, nil
}

View File

@@ -9,6 +9,7 @@ import (
//-----------------------------------------------------------------------------
// TODO: limit/permission on (max - min)
func BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) {
if maxHeight == 0 {
maxHeight = blockStore.Height()
@@ -18,7 +19,7 @@ func BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, err
if minHeight == 0 {
minHeight = MaxInt(1, maxHeight-20)
}
log.Info("BlockchainInfoHandler", "maxHeight", maxHeight, "minHeight", minHeight)
log.Debug("BlockchainInfoHandler", "maxHeight", maxHeight, "minHeight", minHeight)
blockMetas := []*types.BlockMeta{}
for height := maxHeight; height >= minHeight; height-- {
@@ -43,3 +44,28 @@ func Block(height int) (*ctypes.ResultBlock, error) {
block := blockStore.LoadBlock(height)
return &ctypes.ResultBlock{blockMeta, block}, nil
}
//-----------------------------------------------------------------------------
func Commit(height int) (*ctypes.ResultCommit, error) {
if height == 0 {
return nil, fmt.Errorf("Height must be greater than 0")
}
storeHeight := blockStore.Height()
if height > storeHeight {
return nil, fmt.Errorf("Height must be less than or equal to the current blockchain height")
}
header := blockStore.LoadBlockMeta(height).Header
// If the next block has not been committed yet,
// use a non-canonical commit
if height == storeHeight {
commit := blockStore.LoadSeenCommit(height)
return &ctypes.ResultCommit{header, commit, false}, nil
}
// Return the canonical commit (comes from the block at height+1)
commit := blockStore.LoadBlockCommit(height)
return &ctypes.ResultCommit{header, commit, true}, nil
}

View File

@@ -8,16 +8,7 @@ import (
)
func Validators() (*ctypes.ResultValidators, error) {
var blockHeight int
var validators []*types.Validator
state := consensusState.GetState()
blockHeight = state.LastBlockHeight
state.Validators.Iterate(func(index int, val *types.Validator) bool {
validators = append(validators, val)
return false
})
blockHeight, validators := consensusState.GetValidators()
return &ctypes.ResultValidators{blockHeight, validators}, nil
}

View File

@@ -10,7 +10,7 @@ import (
)
func UnsafeFlushMempool() (*ctypes.ResultUnsafeFlushMempool, error) {
mempoolReactor.Mempool.Flush()
mempool.Flush()
return &ctypes.ResultUnsafeFlushMempool{}, nil
}

Some files were not shown because too many files have changed in this diff Show More