Compare commits

...

473 Commits

Author SHA1 Message Date
Ethan Buchman
d5a05eccba Merge pull request #2851 from tendermint/release/v0.26.2
Release/v0.26.2
2018-11-15 18:41:54 -05:00
Ethan Buchman
9973decff9 changelog, versionbump (#2850) 2018-11-15 12:16:24 -05:00
Ethan Buchman
a0412357c1 update some top-level markdown files (#2841)
* update some top-level markdown files

* Update README.md

Co-Authored-By: ebuchman <ethan@coinculture.info>
2018-11-15 12:04:47 -05:00
Zeyu Zhu
a70a53254d Optimize: using parameters in func (#2845)
Signed-off-by: Zeyu Zhu <zhuzeyu0409@gmail.com>
2018-11-15 19:57:13 +04:00
zramsay
1ce24a6282 docs: update config: ref #2800 & #2837 2018-11-15 10:23:00 +04:00
zramsay
814a88a69b more maintainable/useful install scripts 2018-11-15 10:23:00 +04:00
Hleb Albau
6353862ac0 2582 Enable CORS on RPC API (#2800) 2018-11-14 16:47:41 +04:00
Zach
3af11c43f2 cleanup ecosystem docs (#2829) 2018-11-14 14:52:01 +04:00
Zach
27fcf96556 update genesis docs, closes #2814 (#2831) 2018-11-14 14:34:10 +04:00
Zach
bb0e17dbf0 arm: add install script, fix Makefile (#2824)
* be like the SDK makefile

* arm: add install script, fix Makefile

* ...
2018-11-14 14:17:07 +04:00
Anton Kaliaev
5a6822c8ac abci: localClient improvements & bugfixes & pubsub Unsubscribe issues (#2748)
* use READ lock/unlock in ConsensusState#GetLastHeight

Refs #2721

* do not use defers when there's no need

* fix peer formatting (output its address instead of the pointer)

```
[54310]: E[11-02|11:59:39.851] Connection failed @ sendRoutine              module=p2p peer=0xb78f00 conn=MConn{74.207.236.148:26656} err="pong timeout"
```

https://github.com/tendermint/tendermint/issues/2721#issuecomment-435326581

* panic if peer has no state

https://github.com/tendermint/tendermint/issues/2721#issuecomment-435347165

It's confusing that sometimes we check if peer has a state, but most of
the times we expect it to be there

1. add79700b5/mempool/reactor.go (L138)
2. add79700b5/rpc/core/consensus.go (L196) (edited)

I will change everything to always assume peer has a state and panic
otherwise

that should help identify issues earlier

* abci/localclient: extend lock on app callback

App callback should be protected by lock as well (note this was already
done for InitChainAsync, why not for others???). Otherwise, when we
execute the block, tx might come in and call the callback in the same
time we're updating it in execBlockOnProxyApp => DATA RACE

Fixes #2721

Consensus state is locked

```
goroutine 113333 [semacquire, 309 minutes]:
sync.runtime_SemacquireMutex(0xc00180009c, 0xc0000c7e00)
        /usr/local/go/src/runtime/sema.go:71 +0x3d
sync.(*RWMutex).RLock(0xc001800090)
        /usr/local/go/src/sync/rwmutex.go:50 +0x4e
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).GetRoundState(0xc001800000, 0x0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:218 +0x46
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusReactor).queryMaj23Routine(0xc0017def80, 0x11104a0, 0xc0072488f0, 0xc007248
9c0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/reactor.go:735 +0x16d
created by github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusReactor).AddPeer
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/reactor.go:172 +0x236
```

because localClient is locked

```
goroutine 1899 [semacquire, 309 minutes]:
sync.runtime_SemacquireMutex(0xc00003363c, 0xc0000cb500)
        /usr/local/go/src/runtime/sema.go:71 +0x3d
sync.(*Mutex).Lock(0xc000033638)
        /usr/local/go/src/sync/mutex.go:134 +0xff
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client.(*localClient).SetResponseCallback(0xc0001fb560, 0xc007868540)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client/local_client.go:32 +0x33
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy.(*appConnConsensus).SetResponseCallback(0xc00002f750, 0xc007868540)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy/app_conn.go:57 +0x40
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state.execBlockOnProxyApp(0x1104e20, 0xc002ca0ba0, 0x11092a0, 0xc00002f750, 0xc0001fe960, 0xc000bfc660, 0x110cfe0, 0xc000090330, 0xc9d12, 0xc000d9d5a0, ...)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state/execution.go:230 +0x1fd
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state.(*BlockExecutor).ApplyBlock(0xc002c2a230, 0x7, 0x0, 0xc000eae880, 0x6, 0xc002e52c60, 0x16, 0x1f927, 0xc9d12, 0xc000d9d5a0, ...)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/state/execution.go:96 +0x142
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).finalizeCommit(0xc001800000, 0x1f928)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1339 +0xa3e
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).tryFinalizeCommit(0xc001800000, 0x1f928)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1270 +0x451
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit.func1(0xc001800000, 0x0, 0x1f928)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1218 +0x90
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit(0xc001800000, 0x1f928, 0x0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1247 +0x6b8
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote(0xc001800000, 0xc003d8dea0, 0xc000cf4cc0, 0x28, 0xf1, 0xc003bc7ad0, 0xc003bc7b10)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1659 +0xbad
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote(0xc001800000, 0xc003d8dea0, 0xc000cf4cc0, 0x28, 0xf1, 0xf1, 0xf1)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:1517 +0x59
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg(0xc001800000, 0xd98200, 0xc0070dbed0, 0xc000cf4cc0, 0x28)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:660 +0x64b
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine(0xc001800000, 0x0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:617 +0x670
created by github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus.(*ConsensusState).OnStart
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/consensus/state.go:311 +0x132
```

tx comes in and CheckTx is executed right when we execute the block

```
goroutine 111044 [semacquire, 309 minutes]:
sync.runtime_SemacquireMutex(0xc00003363c, 0x0)
        /usr/local/go/src/runtime/sema.go:71 +0x3d
sync.(*Mutex).Lock(0xc000033638)
        /usr/local/go/src/sync/mutex.go:134 +0xff
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client.(*localClient).CheckTxAsync(0xc0001fb0e0, 0xc002d94500, 0x13f, 0x280, 0x0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/abci/client/local_client.go:85 +0x47
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy.(*appConnMempool).CheckTxAsync(0xc00002f720, 0xc002d94500, 0x13f, 0x280, 0x1)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/proxy/app_conn.go:114 +0x51
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/mempool.(*Mempool).CheckTx(0xc002d3a320, 0xc002d94500, 0x13f, 0x280, 0xc0072355f0, 0x0, 0x0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/mempool/mempool.go:316 +0x17b
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/core.BroadcastTxSync(0xc002d94500, 0x13f, 0x280, 0x0, 0x0, 0x0)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/core/mempool.go:93 +0xb8
reflect.Value.call(0xd85560, 0x10326c0, 0x13, 0xec7b8b, 0x4, 0xc00663f180, 0x1, 0x1, 0xc00663f180, 0xc00663f188, ...)
        /usr/local/go/src/reflect/value.go:447 +0x449
reflect.Value.Call(0xd85560, 0x10326c0, 0x13, 0xc00663f180, 0x1, 0x1, 0x0, 0x0, 0xc005cc9344)
        /usr/local/go/src/reflect/value.go:308 +0xa4
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.makeHTTPHandler.func2(0x1102060, 0xc00663f100, 0xc0082d7900)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/handlers.go:269 +0x188
net/http.HandlerFunc.ServeHTTP(0xc002c81f20, 0x1102060, 0xc00663f100, 0xc0082d7900)
        /usr/local/go/src/net/http/server.go:1964 +0x44
net/http.(*ServeMux).ServeHTTP(0xc002c81b60, 0x1102060, 0xc00663f100, 0xc0082d7900)
        /usr/local/go/src/net/http/server.go:2361 +0x127
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.maxBytesHandler.ServeHTTP(0x10f8a40, 0xc002c81b60, 0xf4240, 0x1102060, 0xc00663f100, 0xc0082d7900)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:219 +0xcf
github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server.RecoverAndLogHandler.func1(0x1103220, 0xc00121e620, 0xc0082d7900)
        /root/go/src/github.com/MinterTeam/minter-go-node/vendor/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:192 +0x394
net/http.HandlerFunc.ServeHTTP(0xc002c06ea0, 0x1103220, 0xc00121e620, 0xc0082d7900)
        /usr/local/go/src/net/http/server.go:1964 +0x44
net/http.serverHandler.ServeHTTP(0xc001a1aa90, 0x1103220, 0xc00121e620, 0xc0082d7900)
        /usr/local/go/src/net/http/server.go:2741 +0xab
net/http.(*conn).serve(0xc00785a3c0, 0x11041a0, 0xc000f844c0)
        /usr/local/go/src/net/http/server.go:1847 +0x646
created by net/http.(*Server).Serve
        /usr/local/go/src/net/http/server.go:2851 +0x2f5
```

* consensus: use read lock in Receive#VoteMessage

* use defer to unlock mutex because application might panic

* use defer in every method of the localClient

* add a changelog entry

* drain channels before Unsubscribe(All)

Read 55362ed766/libs/pubsub/pubsub.go (L13)
for the detailed explanation of the issue.

We'll need to fix it someday. Make sure to keep an eye on
https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md

* retry instead of panic when peer has no state in reactors other than consensus

in /dump_consensus_state RPC endpoint, skip a peer with no state

* rpc/core/mempool: simplify error messages

* rpc/core/mempool: use time.After instead of timer

also, do not log DeliverTx result (to be consistent with other memthods)

* unlock before calling the callback in reqRes#SetCallback
2018-11-13 11:32:51 -05:00
Zach
fb10209a96 update to amino 0.14.1 (#2822) 2018-11-13 11:54:43 +04:00
Ethan Buchman
0f793a5a00 Merge pull request #2813 from tendermint/master
Merge pull request #2807 from tendermint/release/v0.26.1
2018-11-12 08:05:12 -05:00
Ethan Buchman
80d0a36250 Merge pull request #2807 from tendermint/release/v0.26.1
Release/v0.26.1
2018-11-12 08:04:27 -05:00
yutianwu
e11699038d [R4R] Add adr-034: PrivValidator file structure (#2751)
* add adr-034

* update changelog

* minor changes

* do some refactor
2018-11-11 14:47:34 -05:00
Ethan Buchman
533b3cfb73 Release/v0.26.1 (#2803)
* changelog and version

* fix changelog
2018-11-11 12:08:28 -05:00
Ismail Khoffi
3ff820bdf4 fix amino overhead computation for Tx (#2792)
* fix amino overhead computation for Tx:

- also count the fieldnum / typ3
- add method to compute overhead per Tx
- slightly clarify comment on MaxAminoOverheadForBlock
- add tests

* fix TestReapMaxBytesMaxGas according to amino overhead

* fix TestMempoolFilters according to amino overhead

* address review comments:

 - add a note about fieldNum = 1
 - add forgotten godoc comment

* fix and use sm.TxPreCheck

* fix test

* remove print statement
2018-11-11 10:09:33 -05:00
Mehmet Gurevin
905abf1388 p2p: re-check after sleeps (#2664)
* p2p: re-check after sleeps

* use NodeInfo as an interface

* Revert "use NodeInfo as an interface"

This reverts commit 5f7d055e6c745ac8c8e5a9a7f0bd5ea5bc3d448c.

* Revert "p2p: re-check after sleeps"

This reverts commit 7f41070da070eadd3312efce1cc821aaf3e23771.

* preserve dial to itself

* ignore ensured connections while re-connecting

* re-check after sleep

* keep protocol definition on net addresses

* decrease log level

* Revert "preserve dial to itself"

This reverts commit 0c6e0fc58da78c378c32bb9ded2dd04ad5e754a9.

* correct func comment according to modification

Co-Authored-By: mgurevin <mehmet@gurevin.net>
2018-11-11 08:14:52 -05:00
Ismail Khoffi
7a4b62d3be check the result of ps.peer.Send before calling ps.setHasVote (#2787)
- actually call `ps.SetHasVote` instead to avoid carrying around
`votes.Height()`, `votes.Round()`, `types.SignedMsgType(votes.Type())`
2018-11-11 07:57:08 -05:00
Jae Kwon
5b19fcf204 p2p: AddressBook requires addresses to have IDs; Do not close conn immediately after sending pex addrs in seed mode (#2797)
* Require addressbook to only store addresses with valid ID

* Do not shut down peer immediately after sending pex addrs in SeedMode

* p2p: fix #2773

* seed mode: use go-routine to sleep before stopping peer
2018-11-11 06:50:25 -05:00
Anton Kaliaev
1944d8534b test AutoFile#Size (happy path) 2018-11-09 16:29:43 +01:00
Anton Kaliaev
13badc1d29 [autofile/group] do not panic when checking size
It's OK if the head will grow a little bit bigger, but we'll avoid
panic.

Refs #2703
2018-11-09 16:29:43 +01:00
Anton Kaliaev
091d2c3e5e openFile creates a file if not exist => ErrNotExist is not possible 2018-11-09 16:29:43 +01:00
Anton Kaliaev
d178ea9eaf use our logger in autofile/group 2018-11-09 16:29:43 +01:00
Catalin Pirvu
46d32af055 Add tests for ValidateBasic methods (#2754)
Fixes #2740
2018-11-09 15:59:04 +01:00
Zach
8b77328313 [docs] improve organization of ABCI docs & fix links (#2749)
* dedup with spec/abci/client-server

* fixup abci/readme

* link to getting started in abci/README

* https

* spec/abci: some deduplication

* docs: remove extraneous comment
2018-11-09 15:11:06 +01:00
Ethan Buchman
6e9aee5460 p2p: peer-id -> peer_id (#2771)
* p2p: peer-id -> peer_id

* update changelog
2018-11-06 21:12:46 -08:00
Anton Kaliaev
d460df1335 mempool: print postCheck error (#2762)
This is a follow-up from https://github.com/tendermint/tendermint/pull/2724

Closes #2761
2018-11-06 20:23:44 -08:00
Jae Kwon
03e42d2e38 Fix crypto/merkle ProofOperators.Verify to check bounds on keypath pa… (#2756)
* Fix crypto/merkle ProofOperators.Verify to check bounds on keypath parts.

* Update PENDING
2018-11-05 22:53:44 -08:00
Anton Kaliaev
b8a9b0bf78 Mempool WAL is still created by default in home directory, leads to permission errors (#2758)
* only invoke InitWAL/CloseWAL if WalPath is not empty

Closes #2717

* panic if WAL is not initialized when calling CloseWAL

* add a changelog entry
2018-11-05 22:39:05 -08:00
Ethan Buchman
7246ffc48f mempool: ErrPreCheck and more log info (#2724)
* mempool: ErrPreCheck and more log info

* change Pre/PostCheckFunc to return errors

also, continue execution when checking txs in mempool_test if
err=PreCheckErr
2018-11-05 22:32:52 -08:00
Ethan Buchman
071ebdd514 Merge pull request #2704 from tendermint/2702-proposal-pol-round-validation
if some process locks a block in round 0, then 0 is valid proposal.PO…
2018-11-05 22:32:15 -08:00
Ethan Buchman
8760c5b4f9 Merge branch 'develop' into 2702-proposal-pol-round-validation 2018-11-05 20:53:05 -08:00
Ethan Buchman
59b75d3f28 Merge pull request #2753 from tendermint/master
Merge pull request #2750 from tendermint/release/v0.26.0
2018-11-03 12:46:55 -07:00
Ethan Buchman
c086d0a341 Merge pull request #2750 from tendermint/release/v0.26.0
Release/v0.26.0
2018-11-03 12:46:14 -07:00
Ethan Buchman
322cee9156 Release/v0.26.0 (#2726)
* changelog_pending -> changelog

* update changelog

* update changelog

* update changelog and upgrading
2018-11-02 13:55:09 -04:00
Anton Kaliaev
80e4fe6c0d [ADR] [DRAFT] pubsub 2.0 (#2532)
* pubsub adr

Refs #951, #1879, #1880

* highlight question

* fix typos after Ismail's review
2018-11-02 10:16:29 +01:00
Anton Kaliaev
fb91ef7462 validate reactor messages (#2711)
* validate reactor messages

Refs #2683

* validate blockchain messages

Refs #2683

* validate evidence messages

Refs #2683

* todo

* check ProposalPOL and signature sizes

* add a changelog entry

* check addr is valid when we add it to the addrbook

* validate incoming netAddr (not just nil check!)

* fixes after Bucky's review

* check timestamps

* beef up block#ValidateBasic

* move some checks into bcBlockResponseMessage

* update Gopkg.lock

Fix

```
grouped write of manifest, lock and vendor: failed to export github.com/tendermint/go-amino: fatal: failed to unpack tree object 6dcc6ddc143e116455c94b25c1004c99e0d0ca12
```

by running `dep ensure -update`

* bump year since now we check it

* generate test/p2p/data on the fly using tendermint testnet

* allow sync chains older than 1 year

* use full path when creating a testnet

* move testnet gen to test/docker/Dockerfile

* relax LastCommitRound check

Refs #2737

* fix conflicts after merge

* add small comment

* some ValidateBasic updates

* fixes

* AppHash length is not fixed
2018-11-01 02:07:18 -04:00
Ethan Buchman
a22c962e28 TMHASH is 32 bytes. Closes #1990 (#2732)
* tmhash is fully 32 bytes. closes #1990

* AddressSize

* fix tests

* fix max sizes
2018-10-31 12:42:05 -04:00
Jae Kwon
1660e30ffe Fix general merkle keypath to start w/ last op's key (#2733)
* Fix general merkle keypath to start w/ last op's key

* Update CHANGELOG_PENDING.md
2018-10-31 11:02:13 -04:00
Zarko Milosevic
a83c268d7f Fix spec (#2736) 2018-10-31 10:59:01 -04:00
Zarko Milosevic
c5905900eb Simplify proposal msg (#2735)
* Align Proposal message with spec

* Update spec
2018-10-31 10:27:11 -04:00
Zarko Milosevic
7a03344480 Introduce EventValidBlock for informing peers about wanted block (#2652)
* Introduce EventValidBlock for informing peer about wanted block

* Merge with develop

* Add isCommit flag to NewValidBlock message

- Add test for the case of +2/3 Precommit from the previous round
2018-10-31 09:20:36 -04:00
Ismail Khoffi
a530352f61 Align Vote/Proposal fields with canonical order and fields (#2730)
* reorder fields

* add TestVoteString & update tests

* remove redundant info from Proposal.String()

* update spec

* revert changes on vote.String() -> more human friendly
2018-10-30 12:16:55 -04:00
yutianwu
60437953ac [R4R] libs/log: add year to log format (#2707)
* add year to log format

* update documentation
2018-10-30 11:46:55 -04:00
Dev Ojha
56d7160606 Add ValidatorPubkeyTypes as a consensus param (#2636)
* Add ValidatorPubkeyTypes as a consensus param

Ref #2414

* update spec

* address anton's comment

* Switch to Validator and Validator Params

* Correct changelog entry

* Address bucky's comments!

* forgot to update changelog

* fix typo

* fix Params naming
2018-10-30 11:36:53 -04:00
Zach
cdc252b818 add fail-test file instead of dep, closes #2638 (#2728)
original author of this file is @ebuchman:

https://github.com/ebuchman/fail-test
2018-10-30 10:34:51 -04:00
Ethan Buchman
b24de1c01c update changelog pending and readme (#2725) 2018-10-29 10:13:11 -04:00
Ismail Khoffi
b6d5b8b745 Update to amino 0.14.0 (#2710)
* WIP: update to amino 0.14.0

* update Changelog

* Update to latest amino version (v0.14.0)
2018-10-29 09:16:50 -04:00
Anton Kaliaev
a67ae81469 if some process locks a block in round 0, then 0 is valid proposal.POLRound in rounds > 0
This condition is really hard to get. Initially, lockedRound and
validRound are set to -1 as we start with round 0.

Refs #2702
2018-10-25 16:40:20 +02:00
zhangzheng
bbf15b3d09 tm-monitor: update health after we added / removed node (#2694)
Refs #2693
2018-10-25 12:27:32 +02:00
Ismail Khoffi
6643c5dd11 Catch up with amino 0.13.0 (#2690)
* catch up with amino changes in
https://github.com/tendermint/go-amino/pull/222

* WIP: update to amino v0.13.0

* update to fixed amino release
2018-10-24 21:34:01 -04:00
Jun Kimura
9795e12ef2 fix RecoverAndLogHandler not to call multiple writeheader (#2688) 2018-10-24 10:07:33 +02:00
Ethan Buchman
be929acd6a Update to Amino v0.13.0-rc0 (#2687)
* types: test tm2pm on fully populated header

* upgrade for amino v0.13.0-rc0

* fix lint

* comment out final test
2018-10-23 13:21:47 -04:00
Ethan Buchman
fe1d59ab7b Set protocol versions in NodeInfo from state (#2686)
* use types.NewValidator

* node: set p2p.ProtocolVersion from state, not globals
2018-10-22 17:55:49 -04:00
Ethan Buchman
f94eb42ebe Version bump; Update Upgrading.md; linkify Changelog (#2679)
* version bump

* update UPGRADING.md

* add missing pr numbers to changelog pending

* linkify changelog
2018-10-19 20:27:00 -04:00
Ethan Buchman
9d62bd0ad3 crypto: use stdlib crypto/rand. ref #2099 (#2669)
* crypto: use stdlib crypto/rand. ref #2099

* comment
2018-10-19 14:29:45 -04:00
Ethan Buchman
30519e8361 types: first field in Canonical structs is Type (#2675)
* types: first field in Canonical structs is Type

* fix spec
2018-10-19 14:23:14 -04:00
Ethan Buchman
7c6519adbd Bucky/changelog (#2673)
* update changelog, add authors script

* update changelog

* update changelog
2018-10-19 13:49:04 -04:00
Ethan Buchman
f536089f0b types: dont use SimpleHashFromMap for header. closes #1841 (#2670)
* types: dont use SimpleHashFromMap for header. closes #1841

* changelog and spec

* comments
2018-10-19 11:39:27 -04:00
Ethan Buchman
746d137f86 p2p: Restore OriginalAddr (#2668)
* p2p: bring back OriginalAddr

* p2p: set OriginalAddr

* update changelog
2018-10-18 18:26:32 -04:00
Ethan Buchman
e798766a27 types: remove Version from CanonicalXxx (#2666) 2018-10-18 18:02:20 -04:00
Ethan Buchman
c3384e88e5 adr-016: update int64->uint64; add version to ConsensusParams (#2667) 2018-10-18 17:32:53 -04:00
Ethan Buchman
ed4ce5ff6c ADR-016: Update ABCI Info method for versions (#2662)
* abci: update RequestInfo for versions

* abci: update ResponseInfo for versions

* compile fix

* fix test

* software_version -> version

* comment fix

* update spec

* add test

* comments and fix test
2018-10-18 16:51:17 -04:00
Anton Kaliaev
055d7adffb add tm-abci python ABCI server (fork of py-abci) (#2658)
It utilises async IO -> greater performance.
2018-10-18 20:03:45 +04:00
Ethan Buchman
14c1baeb24 ADR-016: Add protocol Version to NodeInfo (#2654)
* p2p: add protocol Version to NodeInfo

* update node pkg. remove extraneous version files

* update changelog and docs

* fix test

* p2p: Version -> ProtocolVersion; more ValidateBasic and tests
2018-10-18 10:29:59 -04:00
Ethan Buchman
455d34134c ADR-016: Add versions to Block and State (#2644)
* types: add Version to Header

* abci: add Version to Header

* state: add Version to State

* node: check software and state protocol versions match

* update changelog

* docs/spec: update for versions

* state: more tests

* remove TODOs

* remove empty test
2018-10-17 15:30:53 -04:00
Jack Zampolin
6a07f415e9 Change error output format for better SDK and Voyager UX (#2648)
* Change error output format

* Update tests

* 🤦

* apply suggestion
2018-10-17 14:26:14 -04:00
Anton Kaliaev
d20693fb16 install scripts: update go version, remove upgrade cmd (#2650)
* [install scripts] update go version, remove upgrade

- upgrading OS is out of scope of installation scripts

* add missing log statement

Refs https://github.com/tendermint/tendermint/pull/2642#discussion_r225786794
2018-10-17 12:47:56 -04:00
Hendrik Hofstadt
f60713bca8 privval: Add IPCPV and fix SocketPV (#2568)
Ref: #2563

I added IPC as an unencrypted alternative to SocketPV.

Besides I fixed the following aspects of SocketPV:

Added locking since we are operating on a single socket
The connection deadline is extended every time a successful packet exchange happens; otherwise the connection would always die permanently x seconds after the connection was established.
Added a ping/heartbeat mechanism to keep the connection alive; native TCP keepalives do not work in this use-case


* Extend the SecureConn socket to extend its deadline
* Add locking & ping/heartbeat packets to SocketPV
* Implement IPC PV and abstract socket signing
* Refactored IPC and SocketPV
* Implement @melekes comments
* Fixes to rebase
2018-10-17 10:26:14 +02:00
Uzair1995
ed107d0e84 [scripts/install_tendermint_ubuntu] change /root to local user (#2647) 2018-10-17 10:12:31 +04:00
Peng Zhong
80562669bf add Google Analytics for documentation pages (#2645)
We need Google Analytics to start measuring how many developers are viewing our documentation. That way we can gauge how successful an/or useful various pages are. VuePress supports GA and all we have to provide is the tracking code.

This PR also renames the static docs site to "Tendermint Documentation", which is a better representation of the contents than only "Tendermint Core".
2018-10-16 10:10:52 +04:00
Anton Kaliaev
55362ed766 [pubsub] document design shortcomings (#2641)
Refs https://github.com/tendermint/tendermint/issues/1811#issuecomment-427825250
2018-10-16 10:09:24 +04:00
Dev Ojha
124d0db1e0 Make txs and evidencelist use merkle.SimpleHashFromBytes to create hash (#2635)
This is a performance regression, but will also spare the types directory
from knowing about RFC 6962, which is a more correct abstraction. For txs
this performance hit will be fixed soon with #2603. For evidence, the
performance impact is negligible due to it being capped at a small number.
2018-10-15 16:42:47 -04:00
Joon
4ab7dcf3ac [R4R] Unmerklize ConsensusParams.Hash() (#2609)
* Hash() uses tmhash instead of merkle.SimpleHashFromMap

* marshal whole struct

* update comments

* update docs
2018-10-15 16:31:27 -04:00
Joon
26462025bc standardize header.Hash() (#2610) 2018-10-15 16:28:49 -04:00
Zarko Milosevic
287b25a059 Align with spec (#2642) 2018-10-15 16:05:13 -04:00
Anton Kaliaev
37928cb990 set next validators along with validators while replay (#2637)
Closes #2634
2018-10-14 22:28:41 -04:00
Dev Ojha
0790223518 Comment about ed25519 private key format on Sign (#2632)
Closes #2001
2018-10-13 20:01:21 -04:00
Ethan Buchman
0baa7588c2 p2p: NodeInfo is an interface; General cleanup (#2556)
* p2p: NodeInfo is an interface

* (squash) fixes from review

* (squash) more fixes from review

* p2p: remove peerConn.HandshakeTimeout

* p2p: NodeInfo is two interfaces. Remove String()

* fixes from review

* remove test code from peer.RemoteIP()

* p2p: remove peer.OriginalAddr(). See #2618

* use a mockPeer in peer_set_test.go

* p2p: fix testNodeInfo naming

* p2p: remove unused var

* remove testRandNodeInfo

* fix linter

* fix retry dialing self

* fix rpc
2018-10-12 19:25:33 -04:00
Ismail Khoffi
8888595b94 [R4R] Fixed sized and reordered fields for Vote/Proposal/Heartbeat SignBytes (#2598)
* WIP: switching to fixed offsets for SignBytes

* add version field to sign bytes and update order

* more comments on test-cases and add a tc with a chainID

* remove amino:"write_empty" tag

- it doesn't affect if default fixed size fields ((u)int64) are
written or not
- add comment about int->int64 casting

* update CHANGELOG_PENDING

* update documentation

* add back link to issue #1622 in documentation

* remove JSON tags and add (failing test-case)

* fix failing test

* update test-vectors due to added `Type` field

* change Type field from string to byte and add new type alias

- SignedMsgType replaces VoteTypePrevote, VoteTypePrecommit and adds new
ProposalType to separate votes from proposal when signed

- update test-vectors

* fix remains from rebasing

* use SignMessageType instead of byte everywhere

* fixes from review
2018-10-12 19:21:46 -04:00
Dev Ojha
1b51cf3f46 Remove unnecessary layer of indirection / unnecessary allocation of hashes (#2620) 2018-10-12 17:48:00 -04:00
Zarko Milosevic
2363d88979 consensus: Wait for proposal or timeout before prevote (#2540)
* Fix termination issues and improve tests

* Improve formatting and tests based on reviewer feedback
2018-10-12 16:13:01 -04:00
Anton Kaliaev
e1538bf67e state: require block.Time of the fist block to be genesis time (#2594)
* require block.Time of the fist block to be genesis time

Refs #2587:

```
We only start validating block.Time when Height > 1, because there is no
commit to compute the median timestamp from for the first block. This
means a faulty proposer could make the first block with whatever time
they want.

Instead, we should require the timestamp of block 1 to match the genesis
time.

I discovered this while refactoring the ValidateBlock tests to be
table-driven while working on tests for #2560.
```

* do not accept blocks with negative height

* update changelog and spec

* nanos precision for test genesis time

* Fix failing test (#2607)
2018-10-12 01:03:58 -04:00
Alessio Treglia
3744e8271d [R4R] Pass nil to NewValidatorSet() when genesis file's Validators field is nil (#2617)
* Pass nil to NewValidatorSet() when genesis file's Validators field is nil

Closes: #2616

* Update CHANGELOG_PENDING.md
2018-10-11 23:37:21 -04:00
Ethan Buchman
7b48ea1788 privval: set deadline in readMsg (#2548)
* privval: set deadline in readMsg

* fixes from review
2018-10-11 13:55:36 -04:00
Dev Ojha
69ecda18f9 refactor nop_event_bus.go into event_bus.go (#2605)
This is just to match the style of the rest of the codebase, and to
reduce the number of files in types.
2018-10-11 10:16:25 -04:00
Connor Stein
cc0bea522c Minor doc fix regarding testnet on non-linux OS (#2601) 2018-10-11 10:03:34 -04:00
Ethan Buchman
feb08fa4f8 ed25519: use golang/x/crypto fork (#2558)
* ed25519: use golang/x/crypto fork

* changelog

* gix GenerateFromPassword

* fixes from review
2018-10-11 10:01:53 -04:00
Anton Kaliaev
9a6cdaddf2 fix contributor's name in CHANGELOG_PENDING (#2599)
Refs https://github.com/tendermint/tendermint/pull/2506#issuecomment-428458974
2018-10-10 21:29:13 +04:00
Dev Ojha
12fa9d1cab crypto/merkle: Remove byter in favor of plain byte slices (#2595)
* crypto/merkle: Remove byter in favor of plain byte slices

This PR is fully backwards compatible in terms of function output!
(The Go API differs though) The only test case changes was to refactor
it to be table driven.

* Update godocs per review comments
2018-10-10 12:46:09 -04:00
Matthew Slipper
92343ef484 Add additional metrics (#2500)
* Add additional metrics
Continues addressing https://github.com/cosmos/cosmos-sdk/issues/2169.
* Add nop metrics to fix NPE
* Tweak buckets, code review
* Update buckets
* Update docs with new metrics
* Code review updates
2018-10-10 18:27:43 +02:00
Dev Ojha
ee7b3d260e crypto/amino: Address anton's comment on PubkeyAminoRoute (#2592) 2018-10-10 12:13:42 +04:00
Ethan Buchman
6ec52a9233 types: cap evidence in block validation (#2560)
* cap evidence in block validation

* state: use table-driven test for ValidateBlockHeader

* state: test evidence cap

* fixes from review
2018-10-09 13:31:21 -04:00
Anton Kaliaev
05a119aab5 libs: Test deadlock from listener removal inside callback (#2588)
Closes #2575
2018-10-09 19:31:06 +02:00
Dev Ojha
d7341c4057 distribution: Lock binary dependencies to specific commits (#2550) 2018-10-09 19:28:15 +02:00
Anton Kaliaev
3fcb62b931 :tools: Update docs & fix build-docker Makefile target (#2584)
bump alpine version to 3.8
2018-10-09 16:04:15 +02:00
Dev Ojha
8761b27489 crypto: Add a way to go from pubkey to route (#2574)
This is intended for use in a future PR for #2414
2018-10-09 14:41:33 +02:00
Joon
e7708850c0 libs: Let prefixIterator implements Iterator correctly (#2581)
Fixes #2577
2018-10-09 14:21:36 +02:00
Overbool
561fc2d717 test(db): Test itr.Value in checkValuePanics (#2580)
Fixes #2573
2018-10-09 14:19:00 +02:00
Anton Kaliaev
724e264ff5 separate mock evidence from real evidence (#2571)
Closes #2525
2018-10-09 14:10:05 +02:00
Anton Kaliaev
989a2f32b1 libs: Refactor & document events code (#2576)
* [libs/events] add more godoc comments
* [libs/events] refactor code
- improve var naming
- improve code structure
- do not use defers for unlocking mutexes (defer takes time)
2018-10-09 13:09:40 +02:00
goolAdapter
4b2bf023dd libs: Fix event concurrency flaw (#2519)
* fix event concurrency flaw
* modify changelog
* fix a mistake
* fix a lint issue
* modify changelog
* modify for review issue
* modify for review issue
* modify for review issue
2018-10-08 15:36:31 +02:00
Dev Ojha
c17547ac2f Switch nodeID to use tmhash.Size, add test names for net addr tests (#2559)
* Switch nodeID to be tmhash.Size, add test names for net addr tests
Both of these came up when locally trying to change tmhash size.
* fix error introduced by merge
2018-10-08 15:20:41 +02:00
Dev Ojha
b1e7fac787 crypto/random: Use chacha20, add forward secrecy (#2562)
Ref #2099
2018-10-08 15:15:56 +02:00
Anton Kaliaev
35b671214c tools: Refactor tm-bench (#2570)
* specify time unit for FlushThrottleTimeout in TestP2PConfig
Refs #2555
* [tm-bench] refactor code
https://github.com/tendermint/tendermint/pull/2405#pullrequestreview-157166387
2018-10-08 15:03:38 +02:00
bradyjoestar
4c0c6e0116 [tm-bench] exit on CTRL-C (#2405) 2018-10-08 11:06:01 +04:00
Zach
b8556b97b8 circle: save p2p logs as artifacts (#2566) 2018-10-08 10:49:50 +04:00
goolAdapter
5f88fe0e9b fix p2p switch FlushThrottle value (#2569) 2018-10-08 10:05:12 +04:00
Pierrick Hymbert
1d8348d707 [p2p] Malformed external address causes SIGSEGV (if URL has empty host) (#2564)
fix #2071

Signed-off-by: phymbert <pierrick.hymbert@gmail.com>
2018-10-06 09:53:52 -04:00
Ethan Buchman
f471fc4963 abci: codespace (#2557)
* abci: codespace

* changelog
2018-10-06 09:20:15 -04:00
Joon
2d726a620b add adr (#2553) 2018-10-05 23:44:53 -04:00
Dev Ojha
dfda7b442f types: Remove pubkey from validator hash (#2512)
* types: Remove pubkey from validator hash

* undo lock file change

* Update Spec
2018-10-05 19:26:52 -04:00
Zach
6e5f58191e add spec/abci/readme to sidebar (#2551) 2018-10-05 09:51:23 -04:00
Dev Ojha
c648c93807 Fix random distribution in bitArray.PickRandom (#2534)
* Fix random distribution in bitArray.PickRandom

Previously it was very biased. 63 "_" followed by a single "x" had
much greater odds of being chosen. Additionally, the last element was
skewed. This fixes that by first preproccessing the set of all true
indices, and then randomly selecting a single element from there.

This commit also makes the code here significantly simpler, and
improves test cases.

* unlock mtx right after we select true indices
2018-10-05 11:00:50 +04:00
bradyjoestar
5b120d788a lite support maxOpenConnections (#2413) 2018-10-04 20:39:24 -04:00
Alexander Simmerl
e6a55b7be0 consensus: Add ADR for first stage consensus refactor (#2462) 2018-10-04 20:35:35 -04:00
Ismail Khoffi
d2be7482e1 [ADR][DRAFT] 024: SignBytes and validator types in privval (#2445)
* first draft for ADR summarizing discussion from:
https://github.com/tendermint/tendermint/issues/1622

* fix link and add comment about pub-key per message and fix link

* fix link and add comment about pub-key per message; also:
 - fix link
 - add little diagram
 - fix typo

* Add a slightly different approach

* typo and ADR number
2018-10-04 20:28:27 -04:00
Ethan Buchman
44a72fb642 Merge pull request #2544 from tendermint/bucky/adr-029
Bucky/adr 029
2018-10-04 20:09:34 -04:00
Ethan Buchman
c15fc9ff63 adr-029: update CheckBlock 2018-10-04 20:11:21 -04:00
JamesRay
be1760cc25 Create adr-021-check block txs before prevote.md 2018-10-04 18:43:17 -04:00
Anton Kaliaev
5b1b1ea58a [libs/autofile] fix DATA RACE by removing openFile() call (#2539)
There's a time window after we call RotateFile() where autofile#index+1
does not exist. It will be created during the next call to Write(). BUT
if somebody calls NewReader() before Write(), it will fail with "open
  /tmp/wal#index+1/wal: no such file or directory"

We must create file (either by calling gr.Head.openFile() or directly)
during NewReader() to ensure read calls succeed.

Closes #2538
2018-10-04 17:57:59 -04:00
Jeremiah Andrews
f11aef20a0 Add ADR for Commit changes (#2374) 2018-10-04 17:54:45 -04:00
Zach
303649818c update docs links & sidebar (#2541)
* docs: fix links

* docs: add readme from each section to the sidebar
2018-10-04 17:22:41 -04:00
Zarko Milosevic
12675ecd92 consensus: Wait timeout precommit before starting new round (#2493)
* Disable transitioning to new round upon 2/3+ of Precommit nils

Pull in ensureVote test function from https://github.com/tendermint/tendermint/pull/2132

* Add several ensureX test methods to wrap channel read with timeout

* Revert panic in tests
2018-10-04 09:37:13 -04:00
Anton Kaliaev
cb2e58411f add a missing changelog entry 2018-10-03 11:29:04 +04:00
ValarDragon
0755a5203d bit_array: Simplify subtraction
also, fix potential bug in Or function
2018-10-03 11:29:04 +04:00
JamesRay
c94133ed1b Fix a bug in bit_array's sub function (#2506) 2018-10-03 10:28:46 +04:00
Anton Kaliaev
f3d08f969d [rpc] fix /abci_query: trusted was renamed to prove (#2531) 2018-10-02 20:31:04 -04:00
goolAdapter
5c6999cf8f fix evidence db iter leak (#2516)
Also make reversing a slice more efficient
2018-10-02 11:52:56 +04:00
Dev Ojha
fd1b8598bc Make block_test.go more table driven (#2526) 2018-10-02 11:47:20 +04:00
Anton Kaliaev
32e274cff0 config: Refactor ValidateBasic (#2503)
* timeouts as time.Duration are also breaking for old configs
* split BaseConfig#ValidateBasic into smaller methods
2018-10-01 14:38:35 +02:00
Ethan Buchman
ccd04587ff docs/spec/abci: consensus params and general merkle (#2524)
* docs: links to dirs need a slash

* docs/spec/abci: consensus params and general merkle
2018-09-30 15:08:01 -04:00
Ethan Buchman
52e21cebcf remove some xxx comments and the config.mempool.recheck_empty (#2505)
* remove some XXX

* config: remove Mempool.RecheckEmpty

* docs: remove recheck_empty
2018-09-30 13:28:34 -04:00
Dev Ojha
69c7aa77bc clist: speedup Next by removing defers (#2511)
This change doubles the speed of the mempool's reaping.
Before:

BenchmarkReap-8   	    5000	    365390 ns/op	  122887 B/op

After:

BenchmarkReap-8   	   10000	    158274 ns/op	  122882 B/op
2018-09-30 13:26:14 -04:00
Ethan Buchman
ead9fc0179 Docs cleanup (#2522)
* minor doc cleanup

* docs/tools: link fixes and readme

* docs/networks: networks/local/README.md

* docs: update vuepress config

* docs: fixes from review
2018-09-30 12:35:52 -04:00
Ethan Buchman
f36ed7e7ff General Merkle Follow Up (#2510)
* tmlibs -> libs

* update changelog

* address some comments from review of #2298
2018-09-28 23:32:13 -04:00
Joon
71a34adfe5 General Merkle Proof (#2298)
* first commit

finalize rebase

add protoc_merkle to Makefile

* in progress

* fix kvstore

* fix tests

* remove iavl dependency

* fix tx_test

* fix test_abci_cli

fix test_apps

* fix test_apps

* fix test_cover

* rm rebase residue

* address comment in progress

* finalize rebase
2018-09-28 20:03:19 -04:00
Ismail Khoffi
fc073746a0 privval: Switch to amino encoding in SignBytes (#2459)
* switch to amino for SignBytes and add Reply with error message

- currently only Vote is done

* switch Reply type in socket for other messages

 - add error description on error

* add TODOs regarding error handling

* address comments from peer review session (thx @xla)

 - contains all changes besides the test-coverage / error'ing branches

* increase test coverage:

 - add tests for each newly introduced error'ing code path

* return error if received wrong response

* add test for wrong response branches (ErrUnexpectedResponse)

* update CHANGELOG_PENDING and related documentation (spec)

* fix typo: s/CanonicallockID/CanonicalBlockID

* fixes from review
2018-09-28 19:57:29 -04:00
Anton Kaliaev
47bc15c27a disable mempool WAL by default (#2490) 2018-09-28 19:28:42 -04:00
HaoyangLiu
8dda3c3b28 lite: Add synchronization in lite verify (#2396)
* Implement issues 2386: add synchronization in lite verify and change all Certify to Verify

* Replace make(chan struct{}, 0) with make(chan struct{})

* Parameterize memroy cache size and add concurrent test

* Refactor import order
2018-09-28 19:23:21 -04:00
Ethan Buchman
5173fe9414 Merge pull request #2497 from tendermint/zach/version
docs: Add version
2018-09-27 16:41:13 -04:00
zramsay
d007ade6c3 add version to docs 2018-09-26 17:49:20 -04:00
Anton Kaliaev
4c4a95ca53 config: Add ValidateBasic (#2485)
* add missing options to config.toml template and docs
Refs #2232
* config#ValidateBasic
Refs #2232
* [config] timeouts as time.Duration, not ints
Why:
- native type provides better guarantees than ", in ms" comment (harder
to shoot yourself in the leg)
- flexibility: you can change units
2018-09-26 12:04:44 +02:00
zramsay
df329e8f27 rpc/libs/doc: formatting for godoc, closes #2420 2018-09-26 11:37:26 +04:00
zramsay
cf8b42d813 rpc/core: ints are strings in responses, closes #1896 2018-09-26 11:37:26 +04:00
goolAdapter
110b07fb3f libs: Call Flush() before rename #2428 (#2439)
* fix Group.RotateFile need call Flush() before rename. #2428
* fix some review issue. #2428
 refactor Group's config: replace  setting member with initial option
* fix a handwriting mistake
* fix a time window error between rename and write.
* fix a syntax mistake.
* change option name Get_ to With_
* fix review issue
* fix review issue
2018-09-25 13:22:45 +02:00
Matthew Slipper
587116dae1 metrics: Add additional metrics to p2p and consensus (#2425)
* Add additional metrics to p2p and consensus
Partially addresses https://github.com/cosmos/cosmos-sdk/issues/2169.
* WIP
* Updates from code review
* Updates from code review
* Add instrumentation namespace to configuration
* Fix test failure
* Updates from code review
* Add quotes
* Add atomic load
* Use storeint64
* Use addInt64 in writePacketMsgTo
2018-09-25 13:14:38 +02:00
Anton Kaliaev
eb0da7f9cb libs: Handle SIGHUP explicitly inside autofile (#2480)
* handle SIGHUP explicitly inside autofile
Refs #2260
* libs: Use consistent channel suffix
2018-09-25 12:43:28 +02:00
Anton Kaliaev
d12e55c494 node: Respond always to OS interrupts (#2479)
* stop node upon receiving SIGTERM or CTRL-Ceven during genesis sleep by setting up interrupt before starting a node

Closes #2434

* call Start, not OnStart when starting a component to avoid:

```
E[09-24|10:13:15.805] Not stopping PubSub -- have not been started yet module=pubsub impl=PubSub
```

being printed on exit
2018-09-25 12:24:18 +02:00
Ethan Buchman
d297f02227 Merge pull request #2438 from tendermint/bucky/versions
version: types
2018-09-23 16:24:14 -04:00
Ethan Buchman
d0f6864c69 Merge pull request #2476 from tendermint/master
Master back to develop
2018-09-23 10:23:17 -04:00
Ethan Buchman
0c9c3292c9 Merge pull request #2473 from tendermint/release/v0.25.0
Release/v0.25.0
2018-09-23 10:22:46 -04:00
Ethan Buchman
e4ee34cfbc version: types 2018-09-23 09:19:12 -04:00
Anton Kaliaev
d16f52eab3 copy without sudo (#2474) 2018-09-23 08:31:19 -04:00
Ethan Buchman
27ba6e8a42 Minor docs cleanup (#2472)
* docs: link consensus to blockchain spec. closes #2422

* docs: deprecate research section. closes #2401

* docs: fix some links

* docs: fix some markdown lists

* docs: fix more links
2018-09-23 02:25:33 -04:00
Ethan Buchman
a8eee4ab28 Merge pull request #2471 from tendermint/master
Merge master to develop
2018-09-23 01:54:33 -04:00
Ethan Buchman
cd172acee8 Merge pull request #2467 from tendermint/release/v0.25.0
Release/v0.25.0
2018-09-23 01:33:35 -04:00
Ethan Buchman
97b43d875a update changelog 2018-09-23 01:25:10 -04:00
Ethan Buchman
b394bd5b5c Merge branch 'develop' into release/v0.25.0 2018-09-23 01:24:50 -04:00
Ethan Buchman
f5824bc837 Update abci and app docs (#2470)
* mempool: update some comments

* make build_c

* docs: notes about databases and WAL files

* docs: determinism. closes #1279

* docs: small note about query paths. closes #2090

* docs: gas

* docs: abci consensus params
2018-09-23 01:14:05 -04:00
Dev Ojha
111e627037 mempool: Filter new txs if they have insufficient gas (#2385)
This also refactors the prior mempool to filter to be known as
"precheck filter" and this new filter is called "postcheck filter"

This PR also fixes a bug where the precheck filter previously didn't
account for the amino overhead, which could a maliciously sized tx to
halt blocks from getting any txs in them.

* Move maxGas outside of function definition to avoid race condition
* Type filter funcs and make public
* Use helper method for post check
* Remove superfluous Filter suffix
* Move default pre/post checks into package
* Fix broken references
* Fix typos
* Expand on examples for checks
2018-09-22 02:50:06 +02:00
Ethan Buchman
ee8b8bbefb flush changelog pending, bump version 2018-09-21 17:41:02 -04:00
Ethan Buchman
dde0936fb8 linkify changelog 2018-09-21 17:37:40 -04:00
Ethan Buchman
2dfde37f44 update changelog and upgrading 2018-09-21 17:34:36 -04:00
Zarko Milosevic
f99e4010f2 Add stats related channel between consensus state and reactor (#2388) 2018-09-21 14:36:48 -04:00
Alessio Treglia
f11db8c1b0 Pass http.ServeTLS() errors back to the caller (#2461)
Closes: #2460

* Pass http.ServeTLS() errors back to the caller
* Update CHANGELOG
* Amend StartHTTPServer() too for consistency's sake
* Revert "Amend StartHTTPServer() too for consistency's sake"
This reverts commit 23bfb4c2e917f581702291fe3ea69fce23f8c89d.
2018-09-21 18:12:29 +02:00
Zach
886a83dfb8 docs: Add assets/instructions for local docs build (#2453)
* ungitignore
* add docs/.vuepress to enable local builds
* config.js needs to be here, one less step
* docs: make spec in sidebar nicer
* docs: local build instructions
2018-09-21 13:39:55 +02:00
Anton Kaliaev
8d50bb9dad conesnsu: follow up to removing some consensus params (#2427)
* follow up to removing some consensus params Refs #2382
* change args type to int64 in state#makeParams
* make valsCount and evidenceCount ints again
* MaxEvidenceBytesPerBlock: include magic number in godoc
* [spec] creating a proposal
* test state#TxFilter
* panic if MaxDataBytes is less than 0
* fixes after review
* use amino#UvarintSize to calculate overhead
0c74291f3b/encoder.go (L85-L90)
* avoid cyclic imports
* you can do better Go, come on
* remove testdouble package
2018-09-21 11:00:36 +02:00
Ethan Buchman
7b727bf3d0 Minor changelog fixes (#2449)
* readme: add some libs to semver

* changelog: some updates
2018-09-20 11:55:36 -04:00
Aravind
84b518b8d3 rpc: Add /consensus_params endpoint (#2415)
* Add /consensus_params endpoint
* Incorporated change https://github.com/tendermint/tendermint/pull/2415#discussion_r219078049
* Fixed an error in pervious commit
2018-09-20 14:31:20 +02:00
Anton Kaliaev
bd951171db docs: Add missing changelog entry and comment (#2451)
Follow-up on https://github.com/tendermint/tendermint/pull/2411
2018-09-20 11:14:02 +02:00
Dev Ojha
0d6b75bd53 common: Delete unused functions (#2452)
These functions were not used anywhere within tendermint, or the
cosmos-sdk. (The functionality is already duplicated in the cosmos-sdk
types package)

* common: Delete unused functions within byteslice
* remove more unused code from strings.go and int.go
* Remove more unused code from int.go
* Fix testcase
2018-09-20 11:12:42 +02:00
ValarDragon
f76312ffe6 docs: Update secure-p2p doc to match the spec + current implementation
Closes #2421.
I am of the opinion that the spec is easier to read than this though,
and we shouldn't really explain this here other than that we use a variant
of station to station protocol, with X25519 for the diffie hellman, and we
describe the related security properties.
2018-09-20 09:56:15 +02:00
bradyjoestar
8aad09d9d4 Output error instead of panic when the given db_backend is not initialised (#2411)
Closes #2371
2018-09-20 09:53:25 +04:00
Ethan Buchman
faa3509646 adr-021: note about tag spacers (#2362) 2018-09-19 18:56:23 -04:00
Ethan Buchman
a045c562a2 update adr-016 (#2435) 2018-09-19 18:11:11 -04:00
bradyjoestar
26aa978456 Make mempool cache a proper LRU (#2407)
Closes #2399
2018-09-19 21:40:22 +04:00
Anton Kaliaev
aa5495f24e p2p: add RPCAddress to NodeInfoOther.String() (#2442)
- remove second AminoVersion from
https://github.com/tendermint/tendermint/pull/2426/files#diff-f2fefc7a06ea0d1e0a910196901e50aaR86

Refs #2433
2018-09-19 15:54:14 +02:00
Dev Ojha
c0cdb9d441 libs : Remove libs/common/word.go (#2431)
We didn't use this code anywhere in the codebase. As such, we probably
should reduce the surface area we support. In the event that we do
in fact require 256 bit words inside of tendermint, we should adapt
the stdlibs' internal word representations, which also handles SIMD.

Inside of the SDK, a separate solution for big ints / larger words
is employed, which uses big ints. This in turn does utilize the stdlibs
SIMD support.
2018-09-19 15:38:30 +02:00
Ethan Buchman
91a8767083 proxy: remove Handshaker from proxy pkg (#2437)
Handshaker was removed from proxy package so it can be called
independently of starting the abci app connections and can return a
result to the caller.
2018-09-19 15:35:09 +02:00
zhangzheng
3e099f75c7 minor note in indexing-transactions.md (#2443) (#2444)
Fix inconsistent documents and code
2018-09-19 13:46:37 +04:00
Alexander Simmerl
bdd01310a0 p2p: Integrate new Transport
We are swapping the exisiting listener implementation with the newly
introduced Transport and its default implementation MultiplexTransport,
removing a large chunk of old connection setup and handling scattered
over the Peer and Switch code. The Switch requires a Transport now and
handles externally passed Peer filters.
2018-09-18 22:26:43 +02:00
Alexander Simmerl
be5d68ea4f p2p: Implement PeerTransport
This is the implementation for the design described in ADR 12[0]. It's
the first step of a larger refactor of the p2p package as tracked in
interface bundling all concerns of low-level connection handling and
isolating the rest of peer lifecycle management from the specifics of
the low-level internet protocols. Even if the swappable implementation
will never be utilised, already the isolation of conn related code in
one place will help with the reasoning about execution path and
addressation of security sensitive issues surfaced through bounty
programs and audits.

We deliberately decided to not have Peer filtering and other management
in the Transport, its sole responsibility is the translation of
connections to Peers, handing those to the caller fully setup. It's the
responsibility of the caller to reject those and or keep track. Peer
filtering will take place in the Switch and can be inspected in a the
following commit.

This changeset additionally is an exercise in clean separation of logic
and other infrastructural concerns like logging and instrumentation. By
leveraging a clean and minimal interface. How this looks can be seen in
a follow-up change.

Design #2069[2]
Refs #2067[3]
Fixes #2047[4]
Fixes #2046[5]

changes:
* describe Transport interface
* implement new default Transport: MultiplexTransport
* test MultiplexTransport with new constraints
* implement ConnSet for concurrent management of net.Conn, synchronous
to PeerSet
* implement and expose duplicate IP filter
* implemnt TransportOption for optional parametirisation

[0] https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-012-peer-transport.md
[1] https://github.com/tendermint/tendermint/issues/2067
[2] https://github.com/tendermint/tendermint/pull/2069
[3] https://github.com/tendermint/tendermint/issues/2067
[4] https://github.com/tendermint/tendermint/issues/2047
[5] https://github.com/tendermint/tendermint/issues/2046
2018-09-18 22:26:43 +02:00
Anton Kaliaev
89462c52d9 spec: add missing field to NodeInfoOther (#2426)
and fix formatting

Refs https://github.com/tendermint/tendermint/pull/2417#discussion_r218080500
2018-09-18 11:28:32 +02:00
Zarko Milosevic
2fbf810cd8 Delay starting node until Genesis time (#2389) 2018-09-18 13:16:50 +04:00
Anton Kaliaev
64fc8f8157 Merge pull request #2337 from tendermint/anton/cleveldb
BlockIntervalSeconds metric & DB tests cleanup & benchmark improv.
2018-09-18 12:48:45 +04:00
Anton Kaliaev
e1bda36c6c switch back to original fork 2018-09-18 12:16:03 +04:00
Anton Kaliaev
ff9d0cdfb6 generate random txs
otherwise we're benchmarking overriding single key (because hash stays
the same!)
2018-09-18 12:16:03 +04:00
Anton Kaliaev
788474d08d change consensus_block_interval_seconds metric type to gauge
Otherwise, it's impossible to see outliers
https://github.com/tendermint/tendermint/issues/1835#issuecomment-402054099
2018-09-18 12:16:01 +04:00
Anton Kaliaev
484194789f update Vagrantfile to install go1.11 2018-09-18 12:15:44 +04:00
Anton Kaliaev
747797bf3b cleanup after tests! 2018-09-18 12:15:44 +04:00
Anton Kaliaev
76302c651f remove LICENSE from libs/db in favor of root license 2018-09-18 12:15:44 +04:00
Anton Kaliaev
5bfb9001eb switch from jmhodges/levigo to DataDog/leveldb
Why:
original fork is abandoned and not supported anymore.

Changes:
- LevelDB 1.19 (LevelDB and Snappy are both compiled and linked statically, so while you will not need them installed on your target machine, you should have a roughly compatible version of libstdc++.)
- snappy and lz4 libs included by default
2018-09-18 12:15:43 +04:00
Anton Kaliaev
38bced2440 [types] add Address to GenesisValidator (#2418)
Refs #1714
2018-09-18 11:59:52 +04:00
Zach
4fe9906361 docs: Update README (#2393)
* update DOCS_README
* add spec to docs & other lil fixes (#2402)
2018-09-17 18:43:10 +02:00
Anton Kaliaev
fc7f9bcaf6 rpc: Transform /status result.node_info.other into map (#2417)
* [rpc] transform /status result.node_info.other into map
* amino does not support maps, duh

Refs #2391
2018-09-17 18:39:52 +02:00
Ismail Khoffi
8ae3334423 [libs/autofile & db/fsdb] Throw error if file permissions change (#2286)
* Enforce file permissions in case they've changed

* test behaviour for autofile

* use testify in tests and rename `fInf` to `fileInfo`

* return an error if file permissions have changed

- if we can't read the file, we'll still panic

* get rid of "github.com/pkg/errors" dependency

* address review comments:

- prefix instead of suffix
- add state to err and construct formatting in Error() method

* address review comments:

- move error to libs/errors
2018-09-17 14:38:29 +04:00
zhangzheng
c6c0b52d0c tools/tm-bench: bounds check for txSize and improving test cases (#2410)
Fixes #2409
2018-09-17 13:08:47 +04:00
Anton Kaliaev
e3e3c13741 [common] revert started flag when service already stopped (#2326)
also, return ErrNotStarted when trying to stop a not-running service
2018-09-12 19:07:29 -04:00
Dev Ojha
1ea64fc27f Make mempool aware of MaxGas requirement (#2360)
* Make mempool aware of MaxGas requirement

* update spec

* Add tests

* Switch GasWanted from kv store to persistent kv store

* Fix typo in test name

* switch back to using kvstore, not persistent kv store
2018-09-12 16:41:19 -04:00
Anton Kaliaev
0e1cd88863 Remove ConsensusParams.TxSize and ConsensusParams.BlockGossip (#2364)
* remove ConsensusParams.TxSize and ConsensusParams.BlockGossip

Refs #2347

* block part size is now fixed

Refs #2347

* use max data size, not max bytes for tx limit

Refs #2347
2018-09-12 15:44:43 -04:00
Zach
33b4617e9a docs: update link to rpc (#2361)
* md links dont work in slate

* docs: link to rpc

* docs: use unsafe_reset_all

* do not advertise unsafe_reset_priv_validator
2018-09-12 21:03:17 +04:00
Ethan Buchman
503de8c9b8 docs/spec/abci: improve docs on AppHash (#2363) 2018-09-10 09:10:53 -04:00
xiaoping
dea4e96f66 fix docs links (#2352) 2018-09-10 10:42:48 +04:00
Ethan Buchman
a57aae7072 [ADR] ABCI errors and events (#2314)
* Start of ADR

* flesh out abci events and errors adrs

* adr: move 012 to 023

* adr-022: add note from cwgoes
2018-09-09 14:04:01 -04:00
Zach
0bec20a1e3 update readme, clsoes #2357 (#2359) 2018-09-08 16:45:12 +04:00
Ethan Buchman
83a7c04bce Merge pull request #2356 from tendermint/master
Merge master back to develop
2018-09-07 07:47:36 -04:00
Ethan Buchman
d419fffe18 Merge pull request #2350 from tendermint/release/v0.24.0
Release/v0.24.0
2018-09-07 07:46:44 -04:00
Ethan Buchman
c8895dab98 minor note in UPGRADING.md 2018-09-07 07:43:41 -04:00
Ethan Buchman
8b94deca73 Merge pull request #2354 from tendermint/anton/fix-typos-in-spec
Fix typos in spec
2018-09-07 07:28:50 -04:00
Ethan Buchman
4cd2e40fb1 TMBIN -> Amino 2018-09-07 07:28:58 -04:00
Anton Kaliaev
47dc4e6428 fix a few typos in spec 2018-09-07 11:40:16 +04:00
Ethan Buchman
94288006ba minor fixes 2018-09-06 22:47:05 -04:00
Ethan Buchman
22445a5029 Merge pull request #2349 from tendermint/release/v0.24.0
update UPGRADING.md and minor docs fixes
2018-09-06 22:25:20 -04:00
Ethan Buchman
299d46304d update UPGRADING.md and minor docs fixes 2018-09-06 22:35:31 -04:00
Ethan Buchman
5106af484f docs: add abci spec to config.js 2018-09-06 22:18:15 -04:00
Ethan Buchman
114c405120 docs/spec/abci: fixes and more from #2249 2018-09-06 22:17:00 -04:00
Ethan Buchman
246a56283a Merge pull request #2343 from tendermint/release/v0.24.0
Major spec update to prepare v0.24.0 for release
2018-09-06 20:44:54 -04:00
Ethan Buchman
1144e72c61 docs: refactor ABCI docs
* move spec/software/abci.md to spec/abci/apps.md and improve it
* move some of app-dev/app-development.md to spec/abci/client-server.md
2018-09-06 20:51:36 -04:00
Ethan Buchman
3fd54c5df5 docs/spec/abci: update spec
* better overview section
* section on tags
* remove notes about state/concurrency from CheckTx
* incorporate feedback from #2249
* explain how validator set updates effect future blocks
2018-09-06 19:36:30 -04:00
Ethan Buchman
20c55cffc4 docs: move app-dev/abci-spec.md to spec/abci/abci.md 2018-09-06 18:36:11 -04:00
Ethan Buchman
dea34506fb types/time: add note about stripping monotonic part 2018-09-06 17:58:52 -04:00
Ethan Buchman
54fe6ef73c version: add and bump abci version 2018-09-06 17:58:12 -04:00
Ethan Buchman
6fd79d1545 docs/spec/blockchain: remove tags from result for now 2018-09-06 12:47:26 -04:00
Ethan Buchman
8fcabe8b05 docs: fix note about ChainID size 2018-09-06 12:42:23 -04:00
Ethan Buchman
e0fa827a53 docs/spec/blockchain: specify consensus params in state.md 2018-09-06 12:41:57 -04:00
Ethan Buchman
bdf3238710 docs/spec/blockchain: bring blockchain.md up-to-date 2018-09-06 12:41:33 -04:00
Ethan Buchman
ed9e00a8a7 docs/spec/blockchain: fix encoding JSON 2018-09-06 12:41:02 -04:00
Ethan Buchman
604eae86b6 Merge branch 'master' into release/v0.24.0 2018-09-05 19:15:06 -04:00
Ethan Buchman
b616e54c9b changelog and version 2018-09-05 19:26:55 -04:00
Ethan Buchman
fae21c9f52 linkify changelog pending 2018-09-05 19:25:33 -04:00
Ethan Buchman
8c0c4844b6 Merge pull request #2342 from tendermint/dev/changelog2
add script to add links in changelog
2018-09-05 19:10:48 -04:00
Ethan Buchman
bbc30f992e Merge pull request #2340 from tendermint/bucky/changelog
name drop external contribs in changelog
2018-09-05 19:03:03 -04:00
Ethan Buchman
91f8af8fd8 docs/spec/blockchain: update vote, signature, time 2018-09-05 19:14:18 -04:00
ValarDragon
0881068d76 add script to add links in changelog 2018-09-05 15:58:29 -07:00
Ethan Buchman
80c217089a docs/spec/blockchain: remove json tags, dont use HexBytes 2018-09-05 18:57:54 -04:00
Ethan Buchman
61914cf48e docs/tm-core/using-tm: fix indentation for genesis.validators 2018-09-05 18:56:58 -04:00
Ethan Buchman
4416c9e4bc fix links in abci readme. fixes #2335 2018-09-05 18:44:48 -04:00
Ethan Buchman
c9510d0f50 name drop external contribs in changelog 2018-09-05 18:26:12 -04:00
Ethan Buchman
892b170818 Bucky/changelog (#2339)
* update changelog pending

* fixes from review
2018-09-05 18:02:45 -04:00
Zach
7f6bd5c161 docs & spec: deduplicate block-structure.md (#2331) 2018-09-05 12:21:04 -04:00
Anton Kaliaev
eb5cf0f0dd ignore existing peers in DialPeersAsync (#2327)
* ignore existing peers in DialPeersAsync

Fixes #2253

* rename HasPeerWithAddress to IsDialingOrExistingAddress

[breaking] remove Switch#IsDialing

* check if addrBook is nil

to be consistent with other usages of addrBook across Switch

* different log messages for 2 use-cases
2018-09-05 11:52:22 -04:00
Ethan Buchman
2a3520a714 Merge pull request #2134 from tendermint/2027-cs-failure
Add priv_validator_laddr to config.toml
2018-09-05 11:48:34 -04:00
Ethan Buchman
39ab199181 Merge pull request #2333 from tendermint/anton/docs
Update indexing docs & write a test cast
2018-09-05 11:41:42 -04:00
Ethan Buchman
4c7591cf6b Merge pull request #2336 from tendermint/zach/hotfix
fix failing website build
2018-09-05 09:14:47 -04:00
zramsay
0149c8ffee fix failing website build 2018-09-05 08:56:48 -04:00
Zach
9db66deaa2 test make localnet in CI (#2281)
* tests: use make localnet

based on @jackzampolin work in:
https://github.com/cosmos/cosmos-sdk/pull/2067

* keep the p2p tests for now

* fixes after my own review

* nohup

* remove nohup
2018-09-05 13:25:15 +04:00
Anton Kaliaev
098681fd91 test searching txs by height
Refs #2051
2018-09-05 12:01:38 +04:00
Anton Kaliaev
cb91cd5965 [docs] one can also index txs by height now 2018-09-05 11:05:06 +04:00
Zach
92185c017c Several minor docs & spec cleanup (#2330)
* addr_book_strick=false on local nets

* link to spec

* spec: remove TODO, see #1749 instead

* spec: make issues from TODOs

* update docs on addr_book_strict option
2018-09-05 10:30:36 +04:00
JamesRay
d0bb1ab2b0 Filter out empty addresses in persistent_peers/seeds lists (#2323)
Fixes #2320
2018-09-05 10:13:25 +04:00
cong
d27cd972d2 Index tx.height (#2324)
Refs #2051
2018-09-05 10:00:10 +04:00
Anton Kaliaev
29d2db352e update outdated abci-cli install instructions (#2325)
https://github.com/tendermint/tendermint/pull/2301
2018-09-04 23:20:45 +04:00
Ethan Buchman
eabb1ece8e tmtime: Canonical, some comments (#2312) 2018-09-04 12:20:58 +04:00
Anton Kaliaev
166fd82b70 max-bytes PR follow-up (#2318)
* ReapMaxTxs: return all txs if max is negative

this mirrors ReapMaxBytes behavior

See https://github.com/tendermint/tendermint/pull/2184#discussion_r214439950

* increase MaxAminoOverheadForBlock

tested with:

```
func TestMaxAminoOverheadForBlock(t *testing.T) {

        maxChainID := ""
        for i := 0; i < MaxChainIDLen; i++ {
                maxChainID += "𠜎"
        }

        h := Header{
                ChainID:            maxChainID,
                Height:             10,
                Time:               time.Now().UTC(),
                NumTxs:             100,
                TotalTxs:           200,
                LastBlockID:        makeBlockID(make([]byte, 20), 300, make([]byte, 20)),
                LastCommitHash:     tmhash.Sum([]byte("last_commit_hash")),
                DataHash:           tmhash.Sum([]byte("data_hash")),
                ValidatorsHash:     tmhash.Sum([]byte("validators_hash")),
                NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")),
                ConsensusHash:      tmhash.Sum([]byte("consensus_hash")),
                AppHash:            tmhash.Sum([]byte("app_hash")),
                LastResultsHash:    tmhash.Sum([]byte("last_results_hash")),
                EvidenceHash:       tmhash.Sum([]byte("evidence_hash")),
                ProposerAddress:    tmhash.Sum([]byte("proposer_address")),
        }
        b := Block{
                Header:     h,
                Data:       Data{Txs: makeTxs(10000, 100)},
                Evidence:   EvidenceData{},
                LastCommit: &Commit{},
        }

        bz, err := cdc.MarshalBinary(b)
        require.NoError(t, err)

        assert.Equal(t, MaxHeaderBytes+MaxAminoOverheadForBlock-2, len(bz)-1000000-20000-1)
}
```

* fix MaxYYY constants calculation

by using math.MaxInt64

See https://github.com/tendermint/tendermint/pull/2184#discussion_r214444244

* pass mempool filter as an option

See https://github.com/tendermint/tendermint/pull/2184#discussion_r214445869

* fixes after Dev's comments
2018-09-04 11:46:34 +04:00
Ismail Khoffi
1de32fba17 Check for int overflow in clist (#2289)
* explicitly panic if max capacity is reached

* address review comments

* comments and a test
2018-09-02 02:13:09 -04:00
Zarko Milosevic
7b88172f41 Implement BFT time (#2203)
* Implement BFT time

* set LastValidators when creating state in state helper

for heights >= 2
2018-08-31 19:33:51 -04:00
Ethan Buchman
ffe91ae9e3 Merge pull request #2184 from tendermint/2035-max-bytes
MaxBytes
2018-08-31 15:16:37 -04:00
Ethan Buchman
03afad3218 Merge pull request #2307 from tendermint/master
libs/autofile: bring back loops (#2261)
2018-08-31 14:42:48 -04:00
Peng Zhong
5ecdfacb8e fix docs base directory (#2295) 2018-08-31 14:09:43 -04:00
Ethan Buchman
9e940b95ad libs/autofile: bring back loops (#2261)
* libs/autofile: bring back loops

* changelog, version
2018-08-31 14:05:49 -04:00
Anton Kaliaev
4147f856dc update arch doc 2018-08-31 16:10:41 +04:00
Anton Kaliaev
02d1b03abb update comment for MaxBlockSizeBytes 2018-08-31 16:01:22 +04:00
Anton Kaliaev
e873fed815 calculate amino overhead on the fly 2018-08-31 16:01:22 +04:00
Anton Kaliaev
e957f322c7 be more precise in comments 2018-08-31 16:01:21 +04:00
Anton Kaliaev
ad3d42981a update Gopkg.lock 2018-08-31 16:01:21 +04:00
Anton Kaliaev
0f7485690e limit chain ID to 50 symbols max 2018-08-31 16:01:21 +04:00
Anton Kaliaev
d73c5cbdb1 reap max bytes from the mempool & check transaction size
See ADR 020: Limiting txs size inside a block docs/architecture/adr-020-block-size.md

Refs #2035
2018-08-31 16:01:21 +04:00
Anton Kaliaev
7b2f7090fd add missing changelog entry (#2303)
https://github.com/tendermint/tendermint/pull/2264#issuecomment-417378396
2018-08-31 11:59:52 +04:00
Dev Ojha
61ab10d655 config: reduce default mempool size (#2300)
* config: reduce default mempool size

This reduces the mempool size from 100k to 5k. Note that each secp256k1 sig
takes .5ms to compute. Therefore an adversary could previously delay every
node on the network's computation time upon receiving a block by 50 seconds.

This now reduces that ability to being able to only delay each node by 2.5
seconds. This change should be reverted once ABCI recheck is implemented.

* (squash this) fix test
2018-08-30 17:41:58 -04:00
Anton Kaliaev
3a6cc5e6af cache codecov script (#2291) 2018-08-30 10:11:33 +04:00
Alessio Treglia
c43fb700e3 New NewGoLevelDBWithOpts() to pass opts down to goleveldb (#2293)
Closes: #2292
2018-08-29 08:44:55 +04:00
Dev Ojha
bd531401a0 mempool: Store txs by hash inside of cache (#2234)
* mempool: Store txs by hash inside of cache

This allows for large cachesizes, without fear of the memory
consumption growing rapidly.

* (squash this) rename hashedTx -> txHash
2018-08-29 08:10:06 +04:00
Ismail Khoffi
9d06d7e306 update secret connection to use a little endian encoded nonce (#2264)
* update secret connection to use a little endian encoded nonce

* update encoding of chunk length to be little endian, too

* update comment

* Change comment slightly to trigger circelci
2018-08-28 09:37:38 +04:00
Dev Ojha
b1bc3e4f89 crypto/secp256k1: Fix signature malleability, adopt more efficient en… (#2239)
* crypto/secp256k1: Fix signature malleability, adopt more efficient encoding

This removes signature malleability per ADR 14, and makes secp match
the encoding in ADR 15.

* (squash this) add lock
2018-08-28 09:32:54 +04:00
Ethan Buchman
38b401657e Cleanup up Multisig naming (#2255)
* crypto/multisig: Pubkey -> PubKey

* crypto/encoding/amino: use pkg vars for routes

* crypto/multisig/bitarray

* crypto/multisig: ThresholdMultiSignaturePubKey -> PubKeyMultisigThreshold

* crypto/encoding/amino: add PubKeyMultisig to table

* remove bA import alias

https://github.com/tendermint/tendermint/pull/2255#discussion_r211900709
2018-08-28 08:41:40 +04:00
Zach
8972b6e293 Update config.js (#2287) 2018-08-28 08:37:35 +04:00
Alessio Treglia
5f255f0f71 Replace db_path with db_dir in default configuration (#2284)
* db_path is not being parsed

Fix default configuration, db_path is now db_dir.

Closes: cosmos/cosmos-sdk#1712

* Update CHANGELOG_PENDING.md
2018-08-27 17:27:18 +04:00
Peng Zhong
20e35654c6 lint markdown docs using a stop-words and write-good linters (#2195)
* lint docs with write-good, stop-words

* remove package-lock.json

* update changelog

* fix wrong paragraph formatting

* fix some docs formatting

* fix docs format

* fix abci spec format
2018-08-27 11:33:46 +04:00
Ahmad M ElShareif
8a84593c02 Reduce code in common/math (#2274) 2018-08-27 10:43:15 +04:00
Zach
aab5947d82 docs: deprecate RTD (#2280) 2018-08-27 10:37:54 +04:00
Zach
2f7fc87230 docs: fix img links, closes #2214 (#2282) 2018-08-27 10:36:22 +04:00
Anton Kaliaev
1cf6712a36 quick fix for CircleCI (#2279)
See https://discuss.circleci.com/t/saving-cache-stopped-working-warning-skipping-this-step-disabled-in-configuration/24423/2
2018-08-27 10:19:27 +04:00
Dev Ojha
43ebc77f9b Fix typo, closes #2269 (#2277) 2018-08-26 11:45:34 +04:00
Ethan Buchman
debe56326f Merge pull request #2159 from tendermint/bucky/abci-validators
Bucky/abci validators
2018-08-17 11:12:27 -04:00
Ethan Buchman
6dde320591 fixes from review 2018-08-17 10:32:10 -04:00
bradyjoestar
62b2093da5 ABCIAppClient conn close (#2236)
Refs https://github.com/grpc/grpc-go/issues/2264
2018-08-17 11:59:46 +04:00
Ethan Buchman
76bb4b15c7 rebase fixes 2018-08-16 13:22:04 -04:00
Ethan Buchman
0cbf9b2a7d update changelog 2018-08-16 13:19:30 -04:00
Ethan Buchman
0701d79046 minor fixes 2018-08-16 13:19:14 -04:00
Ethan Buchman
4f61b97bbe update dep for proto. fix types/proto3/block.proto 2018-08-16 13:19:14 -04:00
Ethan Buchman
1111c1848d update abci spec 2018-08-16 13:19:14 -04:00
Ethan Buchman
c919643c3e abci: move round back from votes to commit 2018-08-16 13:19:14 -04:00
Ethan Buchman
b189ab676f makefile: lint flags 2018-08-16 13:19:14 -04:00
Ethan Buchman
fe6a504374 revert gogo version used to generate files 2018-08-16 13:19:13 -04:00
Ethan Buchman
91376627ea update ADR 2018-08-16 13:19:13 -04:00
Ethan Buchman
e3f54ece2f abci: VoteInfo, ValidatorUpdate. See ADR-018 2018-08-16 13:19:13 -04:00
Ethan Buchman
f26b83f15f abci: add next_validators_hash to header 2018-08-16 13:19:13 -04:00
Ethan Buchman
1f6c7bf22a make: update protoc_abci use of awk 2018-08-16 13:19:13 -04:00
Ethan Buchman
d69cf9dd2f Merge pull request #2231 from tendermint/anton/changelog-2
Add a changelog entry & Upgrading guides
2018-08-15 10:19:06 -04:00
Anton Kaliaev
4e78badac9 docs: note max outbound peers excludes persistent 2018-08-15 12:54:20 +04:00
Anton Kaliaev
684e3cb446 add upgrading guides 2018-08-15 12:53:00 +04:00
Anton Kaliaev
a649deb6ee add a changelog entry 2018-08-15 12:52:43 +04:00
bradyjoestar
5446452b01 pass in NodeKey to NewNode (#2212)
Fixes #1544
2018-08-15 12:29:45 +04:00
bradyjoestar
ad24d66750 [abci-cli] print out all the sub-commands available (#2219) 2018-08-15 12:28:17 +04:00
Anton Kaliaev
eb98f1c3a9 add missing changelog entries (#2224) 2018-08-14 19:16:18 -04:00
Dev Ojha
728d2ed266 crypto: Remove unnecessary prefixes from amino route variable names (#2205)
* crypto: Remove unnecessary ed25519 and secp256k1 prefixes from amino routes.

* (squash this) add changelog

* (squash this) multisig amino fixes

* (squash this) fix build error
2018-08-14 19:13:25 -04:00
Zach
e10666859f Zach/automated docs (#2225)
* add docs/config.js for better developer experience

* update docs_readme :)
2018-08-14 18:40:05 -04:00
Anton Kaliaev
6fad8eaf5a [p2p/pex] connect to more than 10 peers (#2169)
* [p2p/pex] connect to more than 10 peers

also, remove DefaultMinNumOutboundPeers because a) I am not sure it's
needed b) it's super confusing

look closely

```
maxPeers := sw.config.MaxNumPeers - DefaultMinNumOutboundPeers
if maxPeers <= sw.peers.Size() {
  sw.Logger.Info("Ignoring inbound connection: already have enough peers", "address", inConn.RemoteAddr().String(), "numPeers", sw.peers.Size(), "max", maxPeers)
```

we print maxPeers = config.MaxPeers - DefaultMinNumOutboundPeers. So we
may not have enough peers even though we say we have enough.

Refs #2130

* update spec

* replace MaxNumPeers with MaxNumInboundPeers/MaxNumOutboundPeers

Refs #2130

* update changelog

* make max rpc conns formula visible to users

* update spec

* docs: note max outbound peers excludes persistent
2018-08-14 18:25:56 -04:00
Ethan Buchman
db53dc5fd4 Merge pull request #2164 from tendermint/dev/multisig
Threshold Multisignature implementation
2018-08-14 16:37:07 -04:00
ValarDragon
2fe34491ba (squash this) Fix build errors 2018-08-14 11:42:40 -07:00
Anton Kaliaev
80e49abada send ValidatorSetUpdates event when validator set changes (#2161)
Refs #1916
2018-08-14 19:16:35 +04:00
b00f
0f931eeb10 types: allow genesis file to have 0 validators (#2148)
* fixing issue 2015

* Remove comments for code review

* Update tests
2018-08-14 19:02:53 +04:00
Dev Ojha
89668c3179 clist: Speedup functions (#2208)
* clist: Speedup detachNext() and detachPrev()

We used unnecessary function calls, defers, and extra mutexes.
These are not our friends for writing fast code in our libs.

* Remove more defers from clist functions

* Add more benchmarks
2018-08-14 19:00:21 +04:00
Dev Ojha
d0dcb1cde1 cmap: Remove defers (#2210)
All functions in cmap have just one code path. Thus there is not a reason
to use defer statements.
2018-08-14 18:59:04 +04:00
bradyjoestar
ed08ae7321 [tm-monitor] use pubkey.Equals() func instead of raw == (#2221) 2018-08-14 18:57:48 +04:00
ValarDragon
6beaf6e72d (squash this) address Jae's comments on NumTrueBitsBefore 2018-08-13 18:46:33 -07:00
peerlink
3624a17642 blockchain: fix register concrete name. (#2213) 2018-08-13 17:40:49 +04:00
Dev Ojha
8a1a79257e mempool: Keep cache hashmap and linked list in sync (#2188)
* mempool: Keep cache hashmap and linked list in sync

This removes bugs with the linked list being full, but hashmap empty

* address PR comments

* switch clist back to list
2018-08-13 14:42:33 +04:00
Ethan Buchman
9c6fdad276 Merge pull request #2200 from tendermint/anton/missing-changelog-entry
Add missing changelog entry
2018-08-10 17:41:48 -04:00
ValarDragon
8d28344e84 (Squash this) switch to bare 2018-08-10 13:59:35 -05:00
ValarDragon
4cf1dbd676 (squash this) fix amino route 2018-08-10 13:20:59 -05:00
ValarDragon
00db469fc0 (squash this) begin addressing PR comments 2018-08-10 13:14:43 -05:00
Anton Kaliaev
93aadf160f add missing changelog entry
Refs #1954
2018-08-10 09:33:02 +04:00
Dev Ojha
2756be5a59 libs: Remove usage of custom Fmt, in favor of fmt.Sprintf (#2199)
* libs: Remove usage of custom Fmt, in favor of fmt.Sprintf

Closes #2193

* Fix bug that was masked by custom Fmt!
2018-08-10 09:25:57 +04:00
Anton Kaliaev
fc7c298cc0 Remove gogoproto from Makefile's TOOLS (#2198)
* remove gogoproto from tools

because it's not a binary

* update protobuf version to 3.6.1 in `make get_protoc`

* update libs/common/types.pb.go and rpc/grpc/types.pb.go

* fix app tests
2018-08-10 09:14:17 +04:00
bradyjoestar
785786bec4 add json2wal & fix wal2json (#2196)
* add json2wal & fix wal2json

* fix bug

* Update main.go

* add wal2JsonTest file

* Delete wal2JsonTest
2018-08-10 09:09:40 +04:00
ValarDragon
aab26c3ff7 Merge remote-tracking branch 'origin/develop' into dev/multisig 2018-08-09 01:05:39 -05:00
Dev Ojha
5a8fe61200 crypto: Add compact bit array for intended usage in the multisig (#2140)
* crypto: Add compact bit array for intended usage in the multisig

This is in a separate PR for ease of review.

* (squash this) add comment
2018-08-09 09:38:23 +04:00
Ian Tan
3c98cec2c2 Add ADR entry for ProposeTx (#1813)
This adds an ADR entry addressing the implementation of a `ProposeTx`
method in the ABCI proposed in #1776. Fundamentally, this proposal gives
some control of block proposals to the application. The initial use case is
to support the Minimal Viable Plasma specification.
2018-08-08 23:12:36 +04:00
Dev Ojha
1fbca09e3c [ADR] Proposal for multisignature encoding (#1960)
* ADR: Proposal for multisignature encoding

This proposal is partially tied to the resolution of #1957.

* Change title to Encoding standard for multisignatures

* ADR: Change multisigs ADR now that amino must be used for pubkeys

* Address PR comments
2018-08-08 23:10:33 +04:00
Jordan Bibla
37ea7040ef remove JB from codeowners file (#2174) 2018-08-08 13:03:00 +04:00
Ethan Buchman
6770992b01 Merge pull request #2181 from tendermint/zach/docs-fixes
docs: fix links & other improvements
2018-08-07 20:35:13 -04:00
Zach Ramsay
b30596b3a1 docs: fix links & other imrpvoements 2018-08-07 19:47:51 -04:00
Ethan Buchman
ef5c27a2d2 Merge pull request #2154 from tendermint/bucky/speed-up-tests
speed up some tests. ref #2038
2018-08-07 15:57:08 -04:00
Jun Kimura
e1b9bf7c81 set capacity of txsmap (#2166) 2018-08-07 20:50:10 +04:00
ValarDragon
4e7bf10b59 (squash this) squashed bug with multiple signatures at same index. 2018-08-07 11:37:42 -05:00
ValarDragon
67b6d51ff4 (squash this) address PR comments + fix bug in equality check 2018-08-07 10:44:38 -05:00
Dev Ojha
6dbbdb9438 Merge branch 'dev/compact_bitmap' into dev/multisig 2018-08-06 21:20:23 -05:00
ValarDragon
e7dd76c28d crypto: Threshold multisig implementation 2018-08-06 21:17:38 -05:00
ValarDragon
21448bcf4f crypto: Add compact bit array for intended usage in the multisig
This is in a separate PR for ease of review.
2018-08-06 15:55:02 -05:00
Ethan Buchman
ec3e34efd8 Merge pull request #2145 from tendermint/bucky/adr-chain-versions
adr: chain-versions
2018-08-05 22:48:06 -04:00
Ethan Buchman
b19e148bc5 Merge pull request #2144 from tendermint/bucky/adr-protocol-versions
adr: protocol versioning
2018-08-05 22:47:12 -04:00
Ethan Buchman
6f8b62d1f3 Merge pull request #2149 from tendermint/bucky/adr-abci-validators
[ADR] ABCI Validators
2018-08-05 22:44:38 -04:00
Ethan Buchman
e0e19a24a4 Merge pull request #2157 from tendermint/master
Merge pull request #2152 from tendermint/release/v0.23.0
2018-08-05 17:12:01 -04:00
Ethan Buchman
013b9cef64 Merge pull request #2152 from tendermint/release/v0.23.0
Release/v0.23.0
2018-08-05 17:11:22 -04:00
Ethan Buchman
087b657008 speed up some tests. ref #2038 2018-08-05 16:59:23 -04:00
Ethan Buchman
fe835cd456 Merge pull request #2116 from tendermint/265-change-abci-header-to-match-tm
change ABCI header to match Tendermint exactly
2018-08-05 16:55:10 -04:00
Anton Kaliaev
d7035abe73 change ABCI header to match Tendermint exactly
Now that Tendermint Amino will be compatible with proto3, the Header in ABCI
should exactly match the Tendermint header - they will then be encoded
identically in ABCI and in Tendermint Core.

Refs #265
2018-08-05 16:57:38 -04:00
Ethan Buchman
f2b629680a Merge pull request #2153 from tendermint/bucky/merge-0.23.0-to-develop
Bucky/merge 0.23.0 to develop
2018-08-05 16:43:00 -04:00
Ethan Buchman
720ce658f1 Merge branch 'release/v0.23.0' into bucky/merge-0.23.0-to-develop 2018-08-05 16:42:04 -04:00
Ethan Buchman
309a6772d7 types: fix formatting when printing signatures
- use cmn.Fingerprint and %X
2018-08-05 16:35:43 -04:00
Ethan Buchman
8bd514d9fb update changelog 2018-08-05 15:55:48 -04:00
ValarDragon
f903947ff3 crypto: Remove interface from crypto.Signature
Signatures are now []byte, which saves on the number of bytes after
amino encoding

(squash this) address Ismail's comment
2018-08-05 15:46:57 -04:00
Ethan Buchman
ea67fb55eb Merge pull request #2106 from tendermint/1134-add-proposer-to-the-block
add proposer address to block's Header
2018-08-05 15:18:42 -04:00
Ethan Buchman
e1062a657f fixes for ProposerAddress
- state.MakeBlock takes a proposerAddr
- validateBlock only checks that the ProposerAddress is in the validator
  set
- fix raceyness from bad proposer test:
  - use privValidator to get the proposer address (instead of racy
    state)
  - note we had to remove the test that checked the correct proposer was
    included for higher rounds because we don't have a good way to test
    this with multiple consensus states and not using the
    privValidator.Address while calling createProposalBlock was a hack!
2018-08-05 15:19:21 -04:00
Ethan Buchman
4d998b7c03 consensus: failing test for ProposerAddress 2018-08-05 15:17:43 -04:00
Anton Kaliaev
bec9d5cba9 add proposer address to block's Header
Refs #1134

Validation:

- ignored in block.ValidateBasic since it's stateful information
- checked in blockExec.ValidateBlock
2018-08-05 15:16:49 -04:00
Ethan Buchman
06a157ad06 Merge pull request #1815 from tendermint/jae/literefactor4
ValidatorSet change delayed by 1 block, and lite refactor (#2)
2018-08-05 14:54:07 -04:00
Dev Ojha
d6a666b445 p2p/pex: Fix mismatch between dialseeds and checkseeds. (#2151) 2018-08-05 14:47:01 -04:00
Ethan Buchman
c5c2b9601f update changelog and version 2018-08-05 14:14:56 -04:00
Ethan Buchman
7538864c15 Merge branch 'develop' into jae/literefactor4 2018-08-05 13:51:41 -04:00
Ethan Buchman
279259ec8e adr-018: abci validators 2018-08-05 12:45:27 -04:00
Ethan Buchman
ca9d07e5e4 update deps for amaino v0.12.0-rc0 2018-08-05 12:39:08 -04:00
Dev Ojha
6e3c5e8033 p2p/pex: Allow configured seed nodes to not be resolvable over DNS (#2129)
* p2p/pex: Allow configured seed nodes to be offline

Previously you couldn't startup tendermint if a seed node was offline.
This now allows you to startup tendermint, as long as all seed node addresses
are formatted correctly. In the event that all seed nodes are down,
and the address book is empty, then it crashes with an informative error msg.
(This case doesn't occur if no seeds were specified)

Closes #1716

* (Squash this) Address melekes' comments

* (squash this) fix package imports

* (squash this) fix pex_reactor comment

* (squash this) add a test case
2018-08-04 20:35:08 -04:00
Ethan Buchman
8073e51b04 Merge pull request #2096 from tendermint/dev/adr_symmetric
[ADR] Proposal for encoding symmetric cryptography
2018-08-04 20:27:24 -04:00
Ethan Buchman
3161ebbc2f Merge pull request #2091 from tendermint/dev/adr_secp_signatures
[ADR] Fix malleability problems in Secp256k1 signatures
2018-08-04 20:22:56 -04:00
Ethan Buchman
4cbeb30da2 Merge pull request #2136 from tendermint/1944-update-grpc
update genproto
2018-08-03 23:40:21 -04:00
Ethan Buchman
d5b5e5a2e4 Merge pull request #2135 from tendermint/2072-unresponsive-tm-after-cs-failure
consensus: non-responsive to CTRL-C if consensus state panics
2018-08-03 23:39:25 -04:00
ValarDragon
6691492540 (squash this) indicate what Ethereum does 2018-08-03 17:49:46 -07:00
Ethan Buchman
0f80a7da82 adr: chain-versions 2018-08-03 20:23:37 -04:00
Ethan Buchman
ae2238efe6 adr: protocol versioning 2018-08-03 20:21:40 -04:00
Anton Kaliaev
2878c7523f update github bug report template (#2131) 2018-08-03 11:39:57 +04:00
Anton Kaliaev
b1cff0f9bf [libs/autofile] create a Group ticker on Start
1) no need to stop the ticker in createTestGroup() method
2) now there is a symmetry - we start the ticker in OnStart(), we stop it
in OnStop()

Refs #2072
2018-08-03 11:34:58 +04:00
Anton Kaliaev
d09a3a6d3a stop gracefully instead of trying to resume ops
Refs #2072

We most probably shouldn't be running any further when there is some
unexpected panic. Some unknown error happened, and so we don't know if
that will result in the validator signing an invalid thing. It might be
worthwhile to explore a mechanism for manual resuming via some console
or secure RPC system, but for now, halting the chain upon unexpected
consensus bugs sounds like the better option.
2018-08-03 11:24:55 +04:00
ValarDragon
87f09adeec (Squash this) Be more explicit about the exact encoding of the secp signature 2018-08-02 23:27:16 -07:00
ValarDragon
b3a3c8a192 Merge remote-tracking branch 'origin/develop' into dev/adr_secp_signatures 2018-08-02 23:25:14 -07:00
ValarDragon
96fdec0fca crypto: Add compact bit array for intended usage in the multisig
This is in a separate PR for ease of review.
2018-08-02 23:18:09 -07:00
Ethan Buchman
fe5e7808f2 fix Gopkg.lock 2018-08-02 19:15:32 -04:00
Ethan Buchman
2d1c5a1ce6 Merge remote-tracking branch 'origin/develop' into jae/literefactor4 2018-08-02 19:12:22 -04:00
Ethan Buchman
00ebdcd581 update pending changelog 2018-08-02 19:06:29 -04:00
Ethan Buchman
2487210414 Merge pull request #2097 from tendermint/1772-revert
revert "make `/status` RPC endpoint resistant to consensus halt"
2018-08-02 17:31:07 -04:00
ValarDragon
a040c36dfb (squash this) change adr number, remove redundancy in function names 2018-08-02 10:43:47 -07:00
Anton Kaliaev
d579f4c610 update genproto
Closes #1944
2018-08-02 17:54:55 +04:00
Anton Kaliaev
b82138b002 update changelog 2018-08-02 16:48:12 +04:00
Anton Kaliaev
8ed99c2c13 exit from initSighupWatcher child goroutine
also, remove excessive log message

Refs #2072
2018-08-02 16:42:25 +04:00
Anton Kaliaev
4c5a143a70 respawn receiveRoutine so we can properly exit
Closes #2072
2018-08-02 16:36:28 +04:00
Anton Kaliaev
b33f73eaf1 stop autofile and autogroup properly
NOTE: from the ticker#Stop documentation:

```
Stop does not close the channel, to prevent a read from the channel
succeeding incorrectly.
https://golang.org/src/time/tick.go?s=1318:1341#L35
```

Refs #2072
2018-08-02 16:33:34 +04:00
Jae Kwon
e719a93d1d Addressed review for #1815 except those marked as 'TODO make issue' 2018-08-02 03:10:50 -07:00
Jae Kwon
eb9b37e196 Pull out consensus liveness fix, which went to #1815 2018-08-02 01:59:46 -07:00
Dev Ojha
eaa137512c adr: Encoding for cryptography at launch (#2121) 2018-08-01 18:19:21 -04:00
Dev Ojha
023bb99eb0 p2p: Add test vectors for deriving secrets (#2120)
These test vectors are needed for comparison with the Rust implementation.
To implement this effectively, a "RandBool" method was added to cmn.Rand.
2018-08-01 15:06:29 -04:00
Anton Kaliaev
f2f53442c6 reorder BaseConfig according to generated version
also, add `priv_validator_laddr` to the template
2018-08-01 16:20:59 +04:00
Ismail Khoffi
24ae878b9f update encoding test to how amino skips empty pointers 2018-08-01 13:29:41 +02:00
Jae Kwon
619bb3b2d7 Merge remote-tracking branch 'remotes/origin/jae/literefactor5' into jae/literefactor6 2018-08-01 03:06:00 -07:00
Dev Ojha
dde96b75ce abci: Update readme for building protoc (#2124) 2018-08-01 13:57:31 +04:00
Dev Ojha
6fb2f44cc3 p2p: Connect to peers from a seed node immediately (#2115)
This is to reduce wait times when initially connecting. This still runs checks
such as whether you still want additional peers.

A test case has been created, which fails if this change is not included.
2018-07-31 22:09:01 +02:00
Zarko Milosevic
08ad162daa docs: Modify blockchain spec to reflect validator set changes (#2107) 2018-07-31 20:19:57 +02:00
ValarDragon
a83eed104c libs/cmn: Remove Tempfile, Tempdir, switch to ioutil variants (#2114)
Our Tempfile was just a wrapper on ioutil that panicked instead of error.

Our Tempdir was a less safe variant of ioutil's Tempdir.
2018-07-31 19:43:36 +02:00
ValarDragon
be642754f5 libs/cmn/writefileatomic: Handle file already exists gracefully (#2113)
This uses the stdlib's method of creating a tempfile in our write
file atomimc method, with a few modifications. We use a 64 bit number
rather than 32 bit, and therefore a corresponding LCG. This is to
reduce collision probability. (Note we currently used 32 bytes previously,
so this is likely a concern)

We handle reseeding the LCG in such a way that multiple threads are
even less likely to reuse the same seed.
2018-07-31 19:43:36 +02:00
ValarDragon
2608249e5b libs/common: Refactor tempfile code into its own file 2018-07-31 19:43:36 +02:00
Anton Kaliaev
62b8ee270d [docs] Validator's address can be skipped (#2117)
Refs #1712
2018-07-31 18:28:19 +02:00
Anton Kaliaev
0c7338c5f0 abci: Change validators to last_commit_info in RequestBeginBlock (#2074)
* change validators to last_commit_info in RequestBeginBlock
* do not send pubkeys with RequestBeginBlock

Refs #1856
2018-07-30 17:29:40 +02:00
Dev Ojha
231cdc1320 ci: Reduce log output in test_cover (#2105)
This commit makes it such that circle CI only shows the module whose
tests it is currently running in the log, unless a test fails. For each
failing test, it will display the name of all failing tests, along with
their log output. This is done to make
our log output far more scrollable. We lose no information in debugging.
2018-07-30 01:05:27 +02:00
Zaki Manian
fef4fe1c66 Link to "The latest gossip on BFT consensus" (#2102)
The paper is such a useful resource to anyone building on Tendermint. It should be linked in the ReadMe.
2018-07-29 09:49:42 +04:00
Dev Ojha
f00b52b710 libs/autofile/group_test: Remove unnecessary logging (#2100)
Previously we logged `Testing for i <i>` for all i in [0,100).
This was unnecessary. This changes it to just log the value for i on
error messages, to reduce the unnecessary verbosity in log files.
2018-07-29 09:48:37 +04:00
VenkatDatta
188e459273 Removed unnecessary onStart call (#2098)
* Removed unnecessary onStart & onStop calls in reactor

* Refactor OnStart & OnStop in reactor

* Removed redundant OnStart func in reactor
2018-07-29 09:46:53 +04:00
ValarDragon
3d5d254932 (squash this) Mixed up field element and curve element. Idea still stands. 2018-07-28 20:41:19 -07:00
ValarDragon
ce9ddc7cd7 (squash this) Note not to overwrite aead's. 2018-07-28 06:33:15 -07:00
ValarDragon
c03ad56d55 (squash this) Note that this breaks existing keys. 2018-07-28 04:23:22 -07:00
ValarDragon
caef5dcd69 (Squash this) forgot to say that algo_name should be length prefixed 2018-07-28 04:14:07 -07:00
Anton Kaliaev
7634073718 revert "make /status RPC endpoint resistant to consensus halt"
Refs #1772

Reasons:
- this was a bad patch for something not well understood

Lessons learned:
- nobody should be modifying code without understanding the problem
first. it will only result in more technical debt and code rot.
- we never hide information when we suspect a bug or we'not sure what's
going on.
2018-07-28 09:17:42 +04:00
ValarDragon
af2894c0f8 (squash this) improve grammar. 2018-07-27 19:27:25 -07:00
ValarDragon
a2debe57c7 [ADR] Proposal for encoding symmetric cryptography 2018-07-27 18:10:41 -07:00
ValarDragon
5955eddc7d ADR: Fix malleability problems in Secp256k1 signatures
Previously you could not assume that your transaction hash would
appear on chain.
2018-07-27 13:18:21 -07:00
Ethan Buchman
4bab34ae45 Merge pull request #2088 from tendermint/bucky/fix-evidence-test
consensus: Fix test for blocks with evidence
2018-07-27 14:06:12 -04:00
Ethan Buchman
fe5b0d7074 consensus: fix test for blocks with evidence 2018-07-27 14:00:05 -04:00
Anton Kaliaev
96ae535fb8 proto3 timestamp (#2064)
This PR changes ABCI time format from int64 (Unix seconds) to WKT (WellKnownType) google.protobuf.Timestamp.

Refs #1857

Reasons:

better precision
standard DT for proto

* update Gopkg.lock
* [makefile] remove extra grep
    - go list excludes vendor by default now
* proto3 timestamp
* [docs/abci-spec] note about serialisation format
* make time non-nullable
2018-07-27 04:23:19 +02:00
Alexander Simmerl
4be6395ee0 Merge pull request #2085 from tendermint/master
Merge 0.23.8 back into develop
2018-07-27 04:21:34 +02:00
Dev Ojha
bc526f18a4 libs: Make bitarray functions lock parameters that aren't the caller (#2081)
This now makes bit array functions which take in a second bit array, thread
safe. Previously there was a warning on bitarray.Update to be lock the
second parameter externally if thread safety wasrequired.
This was not done within the codebase, so it was fine to change here.

Closes #2080
2018-07-27 04:21:08 +02:00
Jae Kwon
40d6dc2ee5 Merge pull request #2082 from tendermint/release/0.22.8
HotFix for 0.22.7, bump to 0.22.8
2018-07-26 19:13:27 -07:00
Jae Kwon
d542d2c394 Fix 0.22.7, bump to 0.22.8 2018-07-26 18:08:09 -07:00
Alexander Simmerl
fd29fd6465 adr: PeerTransport (#2069)
* p2p: Propose PeerTransport ADR
* adr: Set status to in review
* adr: Add high-level decision
* adr: Extend on the idea of guards
* adr: Rework guards into transport specific filters
* adr: Rename to nodeAddr
* adr: Incorporate review
2018-07-27 02:27:19 +02:00
Dev Ojha
6241e6b927 libs: update BitArray go docs (#2079) 2018-07-27 02:10:58 +02:00
Dev Ojha
9f19229791 .github: Split the issue template into two seperate templates (#2073)
* .github: Split the issue template into two seperate templates

Now we have different bug report and feature request templates.

* Forgot to add the name, and about fields
2018-07-26 12:18:55 +04:00
Alexander Simmerl
bdab37a626 Merge pull request #2054 from tendermint/dev/secret_connection
secret connection update
2018-07-26 00:29:36 +02:00
ValarDragon
7bf28af590 p2p/secret_connection: Switch salsa usage to hkdf + chacha
This now uses one hkdf on the X25519 shared secret to create
a key for the sender and receiver.
The hkdf call is now just called upon the computed shared
secret, since the shared secret is a function of the pubkeys.

The nonces now start at 0, as we are using chacha as a stream
cipher, and the sender and receiver now have different keys.
2018-07-26 00:12:32 +02:00
Zaki Manian
1b04e4e5f1 p2p: Remove RipeMd160.
Generate keys with HKDF instead of hash functions, which provides better security properties.

Add xchacha20poly1305 to secret connection. (Due to rebasing, this code has been removed)
2018-07-26 00:09:37 +02:00
Zach
66fe5b7bae rpc: Improve slate for Jenkins (#2070) 2018-07-25 23:37:08 +02:00
Dev Ojha
24b94d7aa4 crypto: Switch hkdfchacha back to xchacha (#2058)
hkdfchachapoly was a construction we came up with. There is no longer any
reason to use it. We should instead just use xchacha for the remaining use
cases we have. (Symmetrically encrypting the keys in the sdk keys package)
2018-07-25 23:12:39 +02:00
Dev Ojha
0bd4fb96f0 crypto: Add benchmarking code for signature schemes (#2061)
* crypto: Add benchmarking code for signature schemes

This does a slight refactor for the key generation code. It now calls a
seperate unexported method to allow generation from a reader. I think this
will actually reduce time in generation, due to no longer initializing an
extra slice. This was needed in order to enable benchmarking.

This uses an internal package for the benchmarking code, so that this can
be standardized without being exported in the public API. The benchmarking
code is derived from agl/ed25519's benchmarking code, and has copied the
license over.

Closes #1984
2018-07-25 23:07:47 +02:00
Dev Ojha
9cfc47a93b makefile: Add make check_dep and remove make ensure_deps (#2055)
This adds a new makefile command, which is used in CI linting, `make check_dep`.
This ensures the toml is in sync with the lock, and that were not pinning to a
branch in any repository.

This also adapts `make get_vendor_deps` to check the lock, in addition to
populating the vendor directory. This removes the need for `make ensure_deps`.

This makes `make get_vendor_deps` consistent between tendermint and the sdk.
2018-07-25 18:09:52 +02:00
Alexander Simmerl
d212292f86 Merge pull request #2066 from tendermint/bucky/merge-master
Bucky/merge master
2018-07-25 17:36:39 +02:00
Ethan Buchman
7ad92c44cb Merge branch 'master' into bucky/merge-master 2018-07-25 11:34:32 -04:00
Dev Ojha
d149d8f96d github: update PR template to indicate changing pending changelog. (#2059) 2018-07-25 11:34:51 +04:00
Ethan Buchman
74b6cc9057 rpc: log error when we timeout getting validators from consensus (#2045) 2018-07-25 10:56:00 +04:00
Anton Kaliaev
60378fd7f9 abci: remove fee (#2043)
Refs #1861

We don't use the fee field and its likely just confusing.

We can add backwards compatible priority (instead of fee) later.

Note priority is better than fee because it lets the app do the math on how to rank order transactions, rather than forcing that into tendermint (ie. if we return fee, priority would be fee/gas)
2018-07-24 17:28:26 +02:00
Anton Kaliaev
c248ce5ef6 p2p: Reject addrs coming from private peers (#2032)
Refs #1706
2018-07-24 17:10:58 +02:00
Anton Kaliaev
059a03a66a tools: clean up Makefile and remove LICENSE file (#2042)
* lean up Makefile and remove LICENSE file
* remove k8s and build LICENSE files
* remove non-existing target
2018-07-24 16:02:32 +02:00
Alexander Simmerl
abe5b63059 Merge pull request #2040 from tendermint/2027-socket-pv
fix acceptDeadline
2018-07-24 15:52:45 +02:00
Ethan Buchman
a657870b3d update dockerfile 2018-07-24 09:42:08 -04:00
Alexander Simmerl
7e7473ad41 Merge pull request #2044 from tendermint/2019-prometheus
do not overwrite metrics provider in node#NewNode
2018-07-24 15:41:44 +02:00
Anton Kaliaev
75a26ebd6d do not overwrite metrics provider in node#NewNode
also, make running Prometheus server optional.

Closes #2019
2018-07-24 16:01:11 +04:00
Anton Kaliaev
ad580e2734 fix acceptDeadline
before: 1.000000003s
after: 3.000000000s

Refs #2027
2018-07-24 10:51:12 +04:00
Ethan Buchman
f6705f02c7 fixes post merge 2018-07-23 23:39:22 -04:00
Ethan Buchman
ea31c4836a Merge branch 'develop' into jae/literefactor4 2018-07-23 23:28:14 -04:00
Ethan Buchman
f1093edbe2 remove accidental files 2018-07-23 23:11:53 -04:00
Ethan Buchman
b92860b6c4 Merge pull request #1805 from tendermint/jae/optimize_blockchain
Optimizing blockchain reactor.
2018-07-23 22:46:51 -04:00
Ethan Buchman
54d753e64e fix Gopkg, add changelog 2018-07-23 22:48:38 -04:00
Ethan Buchman
e1b48b16c4 Merge branch 'develop' into jae/optimize_blockchain 2018-07-23 22:16:34 -04:00
Ethan Buchman
7c07235649 Merge pull request #2036 from tendermint/master
Merge master to develop
2018-07-23 22:05:44 -04:00
Jae Kwon
b41b89732d Update store.go
Revert to SetSync for saveABCIResponses() as per Ethan's feedback
2018-07-20 14:38:27 -07:00
Ethan Buchman
9018acde5f tmlibs -> tendermint/libs 2018-07-02 14:58:07 -04:00
Ethan Buchman
5453aa6169 Merge branch 'develop' into jae/literefactor4 2018-07-02 14:57:30 -04:00
Jae Kwon
b51ed132f7 Fix test/p2p/pex circle tests; update consensus 2018-06-27 16:24:21 -07:00
Jae Kwon
8524a8da7f Try to fix circle... 2018-06-27 04:22:30 -07:00
Jae Kwon
cfcbc61449 oops 2018-06-27 04:04:33 -07:00
Jae Kwon
9184733261 try it with new consensus? 2018-06-27 02:34:11 -07:00
Jae Kwon
363146dacf just print node1 2018-06-27 02:03:15 -07:00
Jae Kwon
ad1b722898 bump for circle 2018-06-27 00:41:50 -07:00
Jae Kwon
8163b99a75 print docker output to console to debug circle 2018-06-27 00:37:53 -07:00
Jae Kwon
835c2ee74a Print 2018-06-27 00:09:04 -07:00
Jae Kwon
19fc4ac47c remove abci from gopkg.toml 2018-06-26 23:58:47 -07:00
Jae Kwon
acd976ad5b bump circle 2018-06-26 23:42:00 -07:00
Jae Kwon
37ef5485b4 Add logs to lite/*; Fix rpc status to return consensus height, not blockstore height 2018-06-26 16:53:06 -07:00
Anton Kaliaev
7f4498f8b1 remove no longer needed install_abci_apps script
Fixes
https://circleci.com/gh/tendermint/tendermint/12923?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link
2018-06-26 11:10:54 +04:00
Jae Kwon
538c410bcd Fixes from review 2018-06-25 18:16:16 -07:00
Jae Kwon
c3296f2e01 Garbage collect DBProvider (unoptimized); Certifier creation takes a client 2018-06-25 17:13:00 -07:00
Jae Kwon
242a6037e8 Fixes from review 2018-06-25 17:12:52 -07:00
Jae Kwon
bf0ff212b9 Refactor "lite" to handle delayed validator set changes.
Also, fix consensus liveness issue.
2018-06-25 17:12:25 -07:00
Jae Kwon
a5b7ea93c4 Delay validator set changes by 1 block. 2018-06-25 16:59:00 -07:00
Jae Kwon
8128627f08 Optimizing blockchain reactor.
Should be paired with https://github.com/tendermint/iavl/pull/65.
2018-06-22 21:47:48 -07:00
549 changed files with 38382 additions and 16138 deletions

1550
.circleci/codecov.sh Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -41,10 +41,10 @@ jobs:
key: v3-pkg-cache
paths:
- /go/pkg
- save_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
paths:
- /go/src/github.com/tendermint/tendermint
# - save_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
# paths:
# - /go/src/github.com/tendermint/tendermint
build_slate:
<<: *defaults
@@ -53,8 +53,23 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
# https://discuss.circleci.com/t/saving-cache-stopped-working-warning-skipping-this-step-disabled-in-configuration/24423/2
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: slate docs
command: |
@@ -69,14 +84,35 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
make get_dev_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: metalinter
command: |
set -ex
export PATH="$GOBIN:$PATH"
make metalinter
- run:
name: check_dep
command: |
set -ex
export PATH="$GOBIN:$PATH"
make check_dep
test_abci_apps:
<<: *defaults
@@ -85,8 +121,22 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: Run abci apps tests
command: |
@@ -102,8 +152,22 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: Run abci-cli tests
command: |
@@ -117,8 +181,22 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils
- run:
name: Run tests
@@ -132,8 +210,22 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run: mkdir -p /tmp/logs
- run:
name: Run tests
@@ -141,7 +233,7 @@ jobs:
for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do
id=$(basename "$pkg")
GOCACHE=off go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
GOCACHE=off go test -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
done
- persist_to_workspace:
root: /tmp/workspace
@@ -157,12 +249,48 @@ jobs:
at: /tmp/workspace
- restore_cache:
key: v3-pkg-cache
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: Run tests
command: bash test/persist/test_failure_indices.sh
localnet:
working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
machine:
image: circleci/classic:latest
environment:
GOBIN: /home/circleci/.go_workspace/bin
GOPATH: /home/circleci/.go_workspace/
GOOS: linux
GOARCH: amd64
parallelism: 1
steps:
- checkout
- run:
name: run localnet and exit on failure
command: |
set -x
make get_tools
make get_vendor_deps
make build-linux
make localnet-start &
./scripts/localnet-blocks-test.sh 40 5 10 localhost
test_p2p:
environment:
GOBIN: /home/circleci/.go_workspace/bin
@@ -174,14 +302,30 @@ jobs:
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run: bash test/p2p/circleci.sh
- store_artifacts:
path: /home/circleci/project/test/p2p/logs
upload_coverage:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: gather
command: |
@@ -193,7 +337,7 @@ jobs:
done
- run:
name: upload
command: bash <(curl -s https://codecov.io/bash) -f coverage.txt
command: bash .circleci/codecov.sh -f coverage.txt
workflows:
version: 2
@@ -218,6 +362,9 @@ workflows:
- test_persistence:
requires:
- setup_dependencies
- localnet:
requires:
- setup_dependencies
- test_p2p
- upload_coverage:
requires:

2
.github/CODEOWNERS vendored
View File

@@ -4,4 +4,4 @@
* @ebuchman @melekes @xla
# Precious documentation
/docs/ @zramsay @jolesbi
/docs/ @zramsay

View File

@@ -1,42 +0,0 @@
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
<!--
If this is a BUG REPORT, please:
- Fill in as much of the template below as you can.
If this is a FEATURE REQUEST, please:
- Describe *in detail* the feature/behavior/change you'd like to see.
In both cases, be ready for followup questions, and please respond in a timely
manner. We might ask you to provide additional logs and data (tendermint & app)
in a case of bug.
-->
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
**Environment**:
- **OS** (e.g. from /etc/os-release):
- **Install tools**:
- **Others**:
**What happened**:
**What you expected to happen**:
**How to reproduce it** (as minimally and precisely as possible):
**Logs (you can paste a small part showing an error or link a pastebin, gist, etc. containing more of the log file)**:
**Config (you can paste only the changes you've made)**:
**`/dump_consensus_state` output for consensus bugs**
**Anything else do we need to know**:

42
.github/ISSUE_TEMPLATE/bug-report.md vendored Normal file
View File

@@ -0,0 +1,42 @@
---
name: Bug Report
about: Create a report to help us squash bugs!
---
<!--
Please fill in as much of the template below as you can.
Be ready for followup questions, and please respond in a timely
manner. We might ask you to provide additional logs and data (tendermint & app).
-->
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
**Environment**:
- **OS** (e.g. from /etc/os-release):
- **Install tools**:
- **Others**:
**What happened**:
**What you expected to happen**:
**Have you tried the latest version**: yes/no
**How to reproduce it** (as minimally and precisely as possible):
**Logs (paste a small part showing an error (< 10 lines) or link a pastebin, gist, etc. containing more of the log file)**:
**Config (you can paste only the changes you've made)**:
**node command runtime flags**:
**`/dump_consensus_state` output for consensus bugs**
**Anything else we need to know**:

View File

@@ -0,0 +1,13 @@
---
name: Feature Request
about: Create a proposal to request a feature
---
<!--
Please describe *in detail* the feature/behavior/change you'd like to see.
Be ready for followup questions, and please respond in a timely
manner.
Word of caution: poorly thought out proposals may be rejected without deliberation
-->

View File

@@ -3,4 +3,4 @@
* [ ] Updated all relevant documentation in docs
* [ ] Updated all code comments where relevant
* [ ] Wrote tests
* [ ] Updated CHANGELOG.md
* [ ] Updated CHANGELOG_PENDING.md

9
.gitignore vendored
View File

@@ -14,9 +14,11 @@ test/p2p/data/
test/logs
coverage.txt
docs/_build
docs/dist
*.log
abci-cli
docs/node_modules/
index.html.md
scripts/wal2json/wal2json
scripts/cutWALUntil/cutWALUntil
@@ -24,12 +26,19 @@ scripts/cutWALUntil/cutWALUntil
.idea/
*.iml
.vscode/
libs/pubsub/query/fuzz_test/output
shunit2
.tendermint-lite
addrbook.json
*/vendor
*/.glide
.terraform
terraform.tfstate
terraform.tfstate.backup
terraform.tfstate.d
.vscode

View File

@@ -1,5 +1,418 @@
# Changelog
## v0.26.2
*November 15th, 2018*
Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### FEATURES:
- [rpc] [\#2582](https://github.com/tendermint/tendermint/issues/2582) Enable CORS on RPC API (@hleb-albau)
### BUG FIXES:
- [abci] [\#2748](https://github.com/tendermint/tendermint/issues/2748) Unlock mutex in localClient so even when app panics (e.g. during CheckTx), consensus continue working
- [abci] [\#2748](https://github.com/tendermint/tendermint/issues/2748) Fix DATA RACE in localClient
- [amino] [\#2822](https://github.com/tendermint/tendermint/issues/2822) Update to v0.14.1 to support compiling on 32-bit platforms
- [rpc] [\#2748](https://github.com/tendermint/tendermint/issues/2748) Drain channel before calling Unsubscribe(All) in `/broadcast_tx_commit`
## v0.26.1
*November 11, 2018*
Special thanks to external contributors on this release: @katakonst
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### IMPROVEMENTS:
- [consensus] [\#2704](https://github.com/tendermint/tendermint/issues/2704) Simplify valid POL round logic
- [docs] [\#2749](https://github.com/tendermint/tendermint/issues/2749) Deduplicate some ABCI docs
- [mempool] More detailed log messages
- [\#2724](https://github.com/tendermint/tendermint/issues/2724)
- [\#2762](https://github.com/tendermint/tendermint/issues/2762)
### BUG FIXES:
- [autofile] [\#2703](https://github.com/tendermint/tendermint/issues/2703) Do not panic when checking Head size
- [crypto/merkle] [\#2756](https://github.com/tendermint/tendermint/issues/2756) Fix crypto/merkle ProofOperators.Verify to check bounds on keypath parts.
- [mempool] fix a bug where we create a WAL despite `wal_dir` being empty
- [p2p] [\#2771](https://github.com/tendermint/tendermint/issues/2771) Fix `peer-id` label name to `peer_id` in prometheus metrics
- [p2p] [\#2797](https://github.com/tendermint/tendermint/pull/2797) Fix IDs in peer NodeInfo and require them for addresses
in AddressBook
- [p2p] [\#2797](https://github.com/tendermint/tendermint/pull/2797) Do not close conn immediately after sending pex addrs in seed mode. Partial fix for [\#2092](https://github.com/tendermint/tendermint/issues/2092).
## v0.26.0
*November 2, 2018*
Special thanks to external contributors on this release:
@bradyjoestar, @connorwstein, @goolAdapter, @HaoyangLiu,
@james-ray, @overbool, @phymbert, @Slamper, @Uzair1995, @yutianwu.
Special thanks to @Slamper for a series of bug reports in our [bug bounty
program](https://hackerone.com/tendermint) which are fixed in this release.
This release is primarily about adding Version fields to various data structures,
optimizing consensus messages for signing and verification in
restricted environments (like HSMs and the Ethereum Virtual Machine), and
aligning the consensus code with the [specification](https://arxiv.org/abs/1807.04938).
It also includes our first take at a generalized merkle proof system, and
changes the length of hashes used for hashing data structures from 20 to 32
bytes.
See the [UPGRADING.md](UPGRADING.md#v0.26.0) for details on upgrading to the new
version.
Please note that we are still making breaking changes to the protocols.
While the new Version fields should help us to keep the software backwards compatible
even while upgrading the protocols, we cannot guarantee that new releases will
be compatible with old chains just yet. We expect there will be another breaking
release or two before the Cosmos Hub launch, but we will otherwise be paying
increasing attention to backwards compatibility. Thanks for bearing with us!
### BREAKING CHANGES:
* CLI/RPC/Config
* [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) Timeouts are now strings like "3s" and "100ms", not ints
* [config] [\#2505](https://github.com/tendermint/tendermint/issues/2505) Remove Mempool.RecheckEmpty (it was effectively useless anyways)
* [config] [\#2490](https://github.com/tendermint/tendermint/issues/2490) `mempool.wal` is disabled by default
* [privval] [\#2459](https://github.com/tendermint/tendermint/issues/2459) Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer)
* [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version field to State, breaking the format of State as
encoded on disk.
* [rpc] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `/abci_query` takes `prove` argument instead of `trusted` and switches the default
behaviour to `prove=false`
* [rpc] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Remove all `node_info.other.*_version` fields in `/status` and
`/net_info`
* [rpc] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Remove
`_params` suffix from fields in `consensus_params`.
* Apps
* [abci] [\#2298](https://github.com/tendermint/tendermint/issues/2298) ResponseQuery.Proof is now a structured merkle.Proof, not just
arbitrary bytes
* [abci] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version to Header and shift all fields by one
* [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Bump the field numbers for some `ResponseInfo` fields to make room for
`AppVersion`
* [abci] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Updates to ConsensusParams
* Remove `Params` suffix from field names
* Add `Params` suffix to message types
* Add new field and type, `Validator ValidatorParams`, to control what types of validator keys are allowed.
* Go API
* [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) Timeouts are time.Duration, not ints
* [crypto/merkle & lite] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Various changes to accomodate General Merkle trees
* [crypto/merkle] [\#2595](https://github.com/tendermint/tendermint/issues/2595) Remove all Hasher objects in favor of byte slices
* [crypto/merkle] [\#2635](https://github.com/tendermint/tendermint/issues/2635) merkle.SimpleHashFromTwoHashes is no longer exported
* [node] [\#2479](https://github.com/tendermint/tendermint/issues/2479) Remove node.RunForever
* [rpc/client] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove`
* [types] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Remove `Index` and `Total` fields from `TxProof`.
* [types] [\#2598](https://github.com/tendermint/tendermint/issues/2598)
`VoteTypeXxx` are now of type `SignedMsgType byte` and named `XxxType`, eg.
`PrevoteType`, `PrecommitType`.
* [types] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Rename fields in ConsensusParams to remove `Params` suffixes
* [types] [\#2735](https://github.com/tendermint/tendermint/issues/2735) Simplify Proposal message to align with spec
* Blockchain Protocol
* [crypto/tmhash] [\#2732](https://github.com/tendermint/tendermint/issues/2732) TMHASH is now full 32-byte SHA256
* All hashes in the block header and Merkle trees are now 32-bytes
* PubKey Addresses are still only 20-bytes
* [state] [\#2587](https://github.com/tendermint/tendermint/issues/2587) Require block.Time of the fist block to be genesis time
* [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Require block.Version to match state.Version
* [types] Update SignBytes for `Vote`/`Proposal`/`Heartbeat`:
* [\#2459](https://github.com/tendermint/tendermint/issues/2459) Use amino encoding instead of JSON in `SignBytes`.
* [\#2598](https://github.com/tendermint/tendermint/issues/2598) Reorder fields and use fixed sized encoding.
* [\#2598](https://github.com/tendermint/tendermint/issues/2598) Change `Type` field from `string` to `byte` and use new
`SignedMsgType` to enumerate.
* [types] [\#2730](https://github.com/tendermint/tendermint/issues/2730) Use
same order for fields in `Vote` as in the SignBytes
* [types] [\#2732](https://github.com/tendermint/tendermint/issues/2732) Remove the address field from the validator hash
* [types] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version struct to Header
* [types] [\#2609](https://github.com/tendermint/tendermint/issues/2609) ConsensusParams.Hash() is the hash of the amino encoded
struct instead of the Merkle tree of the fields
* [types] [\#2670](https://github.com/tendermint/tendermint/issues/2670) Header.Hash() builds Merkle tree out of fields in the same
order they appear in the header, instead of sorting by field name
* [types] [\#2682](https://github.com/tendermint/tendermint/issues/2682) Use proto3 `varint` encoding for ints that are usually unsigned (instead of zigzag encoding).
* [types] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Add Validator field to ConsensusParams
(Used to control which pubkey types validators can use, by abci type).
* P2P Protocol
* [consensus] [\#2652](https://github.com/tendermint/tendermint/issues/2652)
Replace `CommitStepMessage` with `NewValidBlockMessage`
* [consensus] [\#2735](https://github.com/tendermint/tendermint/issues/2735) Simplify `Proposal` message to align with spec
* [consensus] [\#2730](https://github.com/tendermint/tendermint/issues/2730)
Add `Type` field to `Proposal` and use same order of fields as in the
SignBytes for both `Proposal` and `Vote`
* [p2p] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Add `ProtocolVersion` struct with protocol versions to top of
DefaultNodeInfo and require `ProtocolVersion.Block` to match during peer handshake
### FEATURES:
- [abci] [\#2557](https://github.com/tendermint/tendermint/issues/2557) Add `Codespace` field to `Response{CheckTx, DeliverTx, Query}`
- [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Add `BlockVersion` and `P2PVersion` to `RequestInfo`
- [crypto/merkle] [\#2298](https://github.com/tendermint/tendermint/issues/2298) General Merkle Proof scheme for chaining various types of Merkle trees together
- [docs/architecture] [\#1181](https://github.com/tendermint/tendermint/issues/1181) S
plit immutable and mutable parts of priv_validator.json
### IMPROVEMENTS:
- Additional Metrics
- [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169)
- [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169)
- [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) Added ValidateBasic method, which performs basic checks
- [crypto/ed25519] [\#2558](https://github.com/tendermint/tendermint/issues/2558) Switch to use latest `golang.org/x/crypto` through our fork at
github.com/tendermint/crypto
- [libs/log] [\#2707](https://github.com/tendermint/tendermint/issues/2707) Add year to log format (@yutianwu)
- [tools] [\#2238](https://github.com/tendermint/tendermint/issues/2238) Binary dependencies are now locked to a specific git commit
### BUG FIXES:
- [\#2711](https://github.com/tendermint/tendermint/issues/2711) Validate all incoming reactor messages. Fixes various bugs due to negative ints.
- [autofile] [\#2428](https://github.com/tendermint/tendermint/issues/2428) Group.RotateFile need call Flush() before rename (@goolAdapter)
- [common] [\#2533](https://github.com/tendermint/tendermint/issues/2533) Fixed a bug in the `BitArray.Or` method
- [common] [\#2506](https://github.com/tendermint/tendermint/issues/2506) Fixed a bug in the `BitArray.Sub` method (@james-ray)
- [common] [\#2534](https://github.com/tendermint/tendermint/issues/2534) Fix `BitArray.PickRandom` to choose uniformly from true bits
- [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) Wait for
timeoutPrecommit before starting next round
- [consensus] [\#1745](https://github.com/tendermint/tendermint/issues/1745) Wait for
Proposal or timeoutProposal before entering prevote
- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Only propose ValidBlock, not LockedBlock
- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Initialized ValidRound and LockedRound to -1
- [consensus] [\#1637](https://github.com/tendermint/tendermint/issues/1637) Limit the amount of evidence that can be included in a
block
- [consensus] [\#2652](https://github.com/tendermint/tendermint/issues/2652) Ensure valid block property with faulty proposer
- [evidence] [\#2515](https://github.com/tendermint/tendermint/issues/2515) Fix db iter leak (@goolAdapter)
- [libs/event] [\#2518](https://github.com/tendermint/tendermint/issues/2518) Fix event concurrency flaw (@goolAdapter)
- [node] [\#2434](https://github.com/tendermint/tendermint/issues/2434) Make node respond to signal interrupts while sleeping for genesis time
- [state] [\#2616](https://github.com/tendermint/tendermint/issues/2616) Pass nil to NewValidatorSet() when genesis file's Validators field is nil
- [p2p] [\#2555](https://github.com/tendermint/tendermint/issues/2555) Fix p2p switch FlushThrottle value (@goolAdapter)
- [p2p] [\#2668](https://github.com/tendermint/tendermint/issues/2668) Reconnect to originally dialed address (not self-reported address) for persistent peers
## v0.25.0
*September 22, 2018*
Special thanks to external contributors on this release:
@scriptionist, @bradyjoestar, @WALL-E
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
It also addresses some issues found via security audit, removes various unused
functions from `libs/common`, and implements
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
BREAKING CHANGES:
* CLI/RPC/Config
* [rpc] [\#2391](https://github.com/tendermint/tendermint/issues/2391) /status `result.node_info.other` became a map
* [types] [\#2364](https://github.com/tendermint/tendermint/issues/2364) Remove `TxSize` and `BlockGossip` from `ConsensusParams`
* Maximum tx size is now set implicitly via the `BlockSize.MaxBytes`
* The size of block parts in the consensus is now fixed to 64kB
* Apps
* [mempool] [\#2360](https://github.com/tendermint/tendermint/issues/2360) Mempool tracks the `ResponseCheckTx.GasWanted` and
`ConsensusParams.BlockSize.MaxGas` and enforces:
- `GasWanted <= MaxGas` for every tx
- `(sum of GasWanted in block) <= MaxGas` for block proposal
* Go API
* [libs/common] [\#2431](https://github.com/tendermint/tendermint/issues/2431) Remove Word256 due to lack of use
* [libs/common] [\#2452](https://github.com/tendermint/tendermint/issues/2452) Remove the following functions due to lack of use:
* byteslice.go: cmn.IsZeros, cmn.RightPadBytes, cmn.LeftPadBytes, cmn.PrefixEndBytes
* strings.go: cmn.IsHex, cmn.StripHex
* int.go: Uint64Slice, all put/get int64 methods
FEATURES:
- [rpc] [\#2415](https://github.com/tendermint/tendermint/issues/2415) New `/consensus_params?height=X` endpoint to query the consensus
params at any height (@scriptonist)
- [types] [\#1714](https://github.com/tendermint/tendermint/issues/1714) Add Address to GenesisValidator
- [metrics] [\#2337](https://github.com/tendermint/tendermint/issues/2337) `consensus.block_interval_metrics` is now gauge, not histogram (you will be able to see spikes, if any)
- [libs] [\#2286](https://github.com/tendermint/tendermint/issues/2286) Panic if `autofile` or `db/fsdb` permissions change from 0600.
IMPROVEMENTS:
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialised (@bradyjoestar)
- [mempool] [\#2399](https://github.com/tendermint/tendermint/issues/2399) Make mempool cache a proper LRU (@bradyjoestar)
- [p2p] [\#2126](https://github.com/tendermint/tendermint/issues/2126) Introduce PeerTransport interface to improve isolation of concerns
- [libs/common] [\#2326](https://github.com/tendermint/tendermint/issues/2326) Service returns ErrNotStarted
BUG FIXES:
- [node] [\#2294](https://github.com/tendermint/tendermint/issues/2294) Delay starting node until Genesis time
- [consensus] [\#2048](https://github.com/tendermint/tendermint/issues/2048) Correct peer statistics for marking peer as good
- [rpc] [\#2460](https://github.com/tendermint/tendermint/issues/2460) StartHTTPAndTLSServer() now passes StartTLS() errors back to the caller rather than hanging forever.
- [p2p] [\#2047](https://github.com/tendermint/tendermint/issues/2047) Accept new connections asynchronously
- [tm-bench] [\#2410](https://github.com/tendermint/tendermint/issues/2410) Enforce minimum transaction size (@WALL-E)
## 0.24.0
*September 6th, 2018*
Special thanks to external contributors with PRs included in this release: ackratos, james-ray, bradyjoestar,
peerlink, Ahmah2009, bluele, b00f.
This release includes breaking upgrades in the block header,
including the long awaited changes for delaying validator set updates by one
block to better support light clients.
It also fixes enforcement on the maximum size of blocks, and includes a BFT
timestamp in each block that can be safely used by applications.
There are also some minor breaking changes to the rpc, config, and ABCI.
See the [UPGRADING.md](UPGRADING.md#v0.24.0) for details on upgrading to the new
version.
From here on, breaking changes will be broken down to better reflect how users
are affected by a change.
A few more breaking changes are in the works - each will come with a clear
Architecture Decision Record (ADR) explaining the change. You can review ADRs
[here](https://github.com/tendermint/tendermint/tree/develop/docs/architecture)
or in the [open Pull Requests](https://github.com/tendermint/tendermint/pulls).
You can also check in on the [issues marked as
breaking](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3Abreaking).
BREAKING CHANGES:
* CLI/RPC/Config
- [config] [\#2169](https://github.com/tendermint/tendermint/issues/2169) Replace MaxNumPeers with MaxNumInboundPeers and MaxNumOutboundPeers
- [config] [\#2300](https://github.com/tendermint/tendermint/issues/2300) Reduce default mempool size from 100k to 5k, until ABCI rechecking is implemented.
- [rpc] [\#1815](https://github.com/tendermint/tendermint/issues/1815) `/commit` returns a `signed_header` field instead of everything being top-level
* Apps
- [abci] Added address of the original proposer of the block to Header
- [abci] Change ABCI Header to match Tendermint exactly
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
[ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)):
- Remove PubKey from `Validator` (so it's just Address and Power)
- Introduce `ValidatorUpdate` (with just PubKey and Power)
- InitChain and EndBlock use ValidatorUpdate
- Update field names and types in BeginBlock
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block
- updates returned in ResponseEndBlock for block H will be included in RequestBeginBlock for block H+2
* Go API
- [lite] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Complete refactor of the package
- [node] [\#2212](https://github.com/tendermint/tendermint/issues/2212) NewNode now accepts a `*p2p.NodeKey` (@bradyjoestar)
- [libs/common] [\#2199](https://github.com/tendermint/tendermint/issues/2199) Remove Fmt, in favor of fmt.Sprintf
- [libs/common] SplitAndTrim was deleted
- [libs/common] [\#2274](https://github.com/tendermint/tendermint/issues/2274) Remove unused Math functions like MaxInt, MaxInt64,
MinInt, MinInt64 (@Ahmah2009)
- [libs/clist] Panics if list extends beyond MaxLength
- [crypto] [\#2205](https://github.com/tendermint/tendermint/issues/2205) Rename AminoRoute variables to no longer be prefixed by signature type.
* Blockchain Protocol
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
- Add NextValidatorSet to State, changes on-disk representation of state
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
[ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)).
- Remove ConsensusParams.BlockSize.MaxTxs
- Introduce maximum sizes for all components of a block, including ChainID
- [types] Updates to the block Header:
- [\#1815](https://github.com/tendermint/tendermint/issues/1815) NextValidatorsHash - hash of the validator set for the next block,
so the current validators actually sign over the hash for the new
validators
- [\#2106](https://github.com/tendermint/tendermint/issues/2106) ProposerAddress - address of the block's original proposer
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
[ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)):
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
- malleability removed by requiring `s` to be in canonical form.
* P2P Protocol
- [p2p] [\#2263](https://github.com/tendermint/tendermint/issues/2263) Update secret connection to use a little endian encoded nonce
- [blockchain] [\#2213](https://github.com/tendermint/tendermint/issues/2213) Fix Amino routes for blockchain reactor messages
(@peerlink)
FEATURES:
- [types] [\#2015](https://github.com/tendermint/tendermint/issues/2015) Allow genesis file to have 0 validators (@b00f)
- Initial validator set can be determined by the app in ResponseInitChain
- [rpc] [\#2161](https://github.com/tendermint/tendermint/issues/2161) New event `ValidatorSetUpdates` for when the validator set changes
- [crypto/multisig] [\#2164](https://github.com/tendermint/tendermint/issues/2164) Introduce multisig pubkey and signature format
- [libs/db] [\#2293](https://github.com/tendermint/tendermint/issues/2293) Allow passing options through when creating instances of leveldb dbs
IMPROVEMENTS:
- [docs] Lint documentation with `write-good` and `stop-words`.
- [docs] [\#2249](https://github.com/tendermint/tendermint/issues/2249) Refactor, deduplicate, and improve the ABCI docs and spec (with thanks to @ttmc).
- [scripts] [\#2196](https://github.com/tendermint/tendermint/issues/2196) Added json2wal tool, which is supposed to help our users restore (@bradyjoestar)
corrupted WAL files and compose test WAL files (@bradyjoestar)
- [mempool] [\#2234](https://github.com/tendermint/tendermint/issues/2234) Now stores txs by hash inside of the cache, to mitigate memory leakage
- [mempool] [\#2166](https://github.com/tendermint/tendermint/issues/2166) Set explicit capacity for map when updating txs (@bluele)
BUG FIXES:
- [config] [\#2284](https://github.com/tendermint/tendermint/issues/2284) Replace `db_path` with `db_dir` from automatically generated configuration files.
- [mempool] [\#2188](https://github.com/tendermint/tendermint/issues/2188) Fix OOM issue from cache map and list getting out of sync
- [state] [\#2051](https://github.com/tendermint/tendermint/issues/2051) KV store index supports searching by `tx.height` (@ackratos)
- [rpc] [\#2327](https://github.com/tendermint/tendermint/issues/2327) `/dial_peers` does not try to dial existing peers
- [node] [\#2323](https://github.com/tendermint/tendermint/issues/2323) Filter empty strings from config lists (@james-ray)
- [abci/client] [\#2236](https://github.com/tendermint/tendermint/issues/2236) Fix closing GRPC connection (@bradyjoestar)
## 0.23.1
*August 22nd, 2018*
BUG FIXES:
- [libs/autofile] [\#2261](https://github.com/tendermint/tendermint/issues/2261) Fix log rotation so it actually happens.
- Fixes issues with consensus WAL growing unbounded ala [\#2259](https://github.com/tendermint/tendermint/issues/2259)
## 0.23.0
*August 5th, 2018*
This release includes breaking upgrades in our P2P encryption,
some ABCI messages, and how we encode time and signatures.
A few more changes are still coming to the Header, ABCI,
and validator set handling to better support light clients, BFT time, and
upgrades. Most notably, validator set changes will be delayed by one block (see
[#1815][i1815]).
We also removed `make ensure_deps` in favour of `make get_vendor_deps`.
BREAKING CHANGES:
- [abci] Changed time format from int64 to google.protobuf.Timestamp
- [abci] Changed Validators to LastCommitInfo in RequestBeginBlock
- [abci] Removed Fee from ResponseDeliverTx and ResponseCheckTx
- [crypto] Switch crypto.Signature from interface to []byte for space efficiency
[#2128](https://github.com/tendermint/tendermint/pull/2128)
- NOTE: this means signatures no longer have the prefix bytes in Amino
binary nor the `type` field in Amino JSON. They're just bytes.
- [p2p] Remove salsa and ripemd primitives, in favor of using chacha as a stream cipher, and hkdf [#2054](https://github.com/tendermint/tendermint/pull/2054)
- [tools] Removed `make ensure_deps` in favor of `make get_vendor_deps`
- [types] CanonicalTime uses nanoseconds instead of clipping to ms
- breaks serialization/signing of all messages with a timestamp
FEATURES:
- [tools] Added `make check_dep`
- ensures gopkg.lock is synced with gopkg.toml
- ensures no branches are used in the gopkg.toml
IMPROVEMENTS:
- [blockchain] Improve fast-sync logic
[#1805](https://github.com/tendermint/tendermint/pull/1805)
- tweak params
- only process one block at a time to avoid starving
- [common] bit array functions which take in another parameter are now thread safe
- [crypto] Switch hkdfchachapoly1305 to xchachapoly1305
- [p2p] begin connecting to peers as soon a seed node provides them to you ([#2093](https://github.com/tendermint/tendermint/issues/2093))
BUG FIXES:
- [common] Safely handle cases where atomic write files already exist [#2109](https://github.com/tendermint/tendermint/issues/2109)
- [privval] fix a deadline for accepting new connections in socket private
validator.
- [p2p] Allow startup if a configured seed node's IP can't be resolved ([#1716](https://github.com/tendermint/tendermint/issues/1716))
- [node] Fully exit when CTRL-C is pressed even if consensus state panics [#2072](https://github.com/tendermint/tendermint/issues/2072)
[i1815]: https://github.com/tendermint/tendermint/pull/1815
## 0.22.8
*July 26th, 2018*
BUG FIXES
- [consensus, blockchain] Fix 0.22.7 below.
## 0.22.7
*July 26th, 2018*
@@ -36,9 +449,10 @@ BREAKING CHANGES:
IMPROVEMENTS:
- [abci, libs/common] Generated gogoproto static marshaller methods
- [config] Increase default send/recv rates to 5 mB/s
- [p2p] reject addresses coming from private peers
- [p2p] allow persistent peers to be private
BUG FIXES
BUG FIXES:
- [mempool] fixed a race condition when `create_empty_blocks=false` where a
transaction is published at an old height.
- [p2p] dial external IP setup by `persistent_peers`, not internal NAT IP
@@ -579,7 +993,7 @@ BREAKING CHANGES:
- use scripts/wal2json to convert to json for debugging
FEATURES:
- new `certifiers` pkg contains the tendermint light-client library (name subject to change)!
- new `Verifiers` pkg contains the tendermint light-client library (name subject to change)!
- rpc: `/genesis` includes the `app_options` .
- rpc: `/abci_query` takes an additional `height` parameter to support historical queries.
- rpc/client: new ABCIQueryWithOptions supports options like `trusted` (set false to get a proof) and `height` to query a historical height.

29
CHANGELOG_PENDING.md Normal file
View File

@@ -0,0 +1,29 @@
# Pending
## v0.26.3
*TBD*
Special thanks to external contributors on this release:
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
* CLI/RPC/Config
* Apps
* Go API
* Blockchain Protocol
* P2P Protocol
### FEATURES:
### IMPROVEMENTS:
### BUG FIXES:

View File

@@ -6,7 +6,7 @@ This code of conduct applies to all projects run by the Tendermint/COSMOS team a
# Conduct
## Contact: adrian@tendermint.com
## Contact: conduct@tendermint.com
* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic.

View File

@@ -27,8 +27,8 @@ Of course, replace `ebuchman` with your git handle.
To pull in updates from the origin repo, run
* `git fetch upstream`
* `git rebase upstream/master` (or whatever branch you want)
* `git fetch upstream`
* `git rebase upstream/master` (or whatever branch you want)
Please don't make Pull Requests to `master`.
@@ -50,6 +50,11 @@ as apps, tools, and the core, should use dep.
Run `dep status` to get a list of vendor dependencies that may not be
up-to-date.
When updating dependencies, please only update the particular dependencies you
need. Instead of running `dep ensure -update`, which will update anything,
specify exactly the dependency you want to update, eg.
`dep ensure -update github.com/tendermint/go-amino`.
## Vagrant
If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started
@@ -73,34 +78,41 @@ tested by circle using `go test -v -race ./...`. If not, they will need a
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
includes its continuous integration status using a badge in the `README.md`.
## Changelog
## Branching Model and Release
User-facing repos should adhere to the branching model: http://nvie.com/posts/a-successful-git-branching-model/.
That is, these repos should be well versioned, and any merge to master requires a version bump and tagged release.
Libraries need not follow the model strictly, but would be wise to,
especially `go-p2p` and `go-rpc`, as their versions are referenced in tendermint core.
All repos should adhere to the branching model: http://nvie.com/posts/a-successful-git-branching-model/.
This means that all pull-requests should be made against develop. Any merge to
master constitutes a tagged release.
### Development Procedure:
- the latest state of development is on `develop`
- `develop` must never fail `make test`
- no --force onto `develop` (except when reverting a broken commit, which should seldom happen)
- never --force onto `develop` (except when reverting a broken commit, which should seldom happen)
- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git remote add origin`)
- before submitting a pull request, begin `git rebase` on top of `develop`
- make changes and update the `CHANGELOG_PENDING.md` to record your change
- before submitting a pull request, run `git rebase` on top of the latest `develop`
### Pull Merge Procedure:
- ensure pull branch is rebased on develop
- ensure pull branch is based on a recent develop
- run `make test` to ensure that all tests pass
- merge pull request
- the `unstable` branch may be used to aggregate pull merges before testing once
- push master may request that pull requests be rebased on top of `unstable`
- the `unstable` branch may be used to aggregate pull merges before fixing tests
### Release Procedure:
- start on `develop`
- run integration tests (see `test_integrations` in Makefile)
- prepare changelog/release issue
- prepare changelog:
- copy `CHANGELOG_PENDING.md` to `CHANGELOG.md`
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all issues
- run `bash ./scripts/authors.sh` to get a list of authors since the latest
release, and add the github aliases of external contributors to the top of
the changelog. To lookup an alias from an email, try `bash
./scripts/authors.sh <email>`
- bump versions
- push to release-vX.X.X to run the extended integration tests on the CI
- push to release/vX.X.X to run the extended integration tests on the CI
- merge to master
- merge master back to develop

250
Gopkg.lock generated
View File

@@ -3,48 +3,56 @@
[[projects]]
branch = "master"
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
name = "github.com/beorn7/perks"
packages = ["quantile"]
pruneopts = "UT"
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
branch = "master"
digest = "1:c0decf632843204d2b8781de7b26e7038584e2dcccc7e2f401e88ae85b1df2b7"
name = "github.com/btcsuite/btcd"
packages = ["btcec"]
revision = "f673a4b563b57b9a95832545c878669a7fa801d9"
pruneopts = "UT"
revision = "67e573d211ace594f1366b4ce9d39726c4b19bd0"
[[projects]]
digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf"
name = "github.com/btcsuite/btcutil"
packages = [
"base58",
"bech32"
"bech32",
]
pruneopts = "UT"
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
[[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/ebuchman/fail-test"
packages = ["."]
revision = "95f809107225be108efcf10a3509e4ea6ceef3c4"
pruneopts = "UT"
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70"
name = "github.com/fortytw2/leaktest"
packages = ["."]
pruneopts = "UT"
revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e"
version = "v1.2.0"
[[projects]]
digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd"
name = "github.com/fsnotify/fsnotify"
packages = ["."]
pruneopts = "UT"
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
version = "v1.4.7"
[[projects]]
digest = "1:fdf5169073fb0ad6dc12a70c249145e30f4058647bea25f0abd48b6d9f228a11"
name = "github.com/go-kit/kit"
packages = [
"log",
@@ -53,24 +61,30 @@
"metrics",
"metrics/discard",
"metrics/internal/lv",
"metrics/prometheus"
"metrics/prometheus",
]
pruneopts = "UT"
revision = "4dc7be5d2d12881735283bcab7352178e190fc71"
version = "v0.6.0"
[[projects]]
digest = "1:31a18dae27a29aa074515e43a443abfd2ba6deb6d69309d8d7ce789c45f34659"
name = "github.com/go-logfmt/logfmt"
packages = ["."]
pruneopts = "UT"
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
version = "v0.3.0"
[[projects]]
digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d"
name = "github.com/go-stack/stack"
packages = ["."]
revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc"
version = "v1.7.0"
pruneopts = "UT"
revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a"
version = "v1.8.0"
[[projects]]
digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e"
name = "github.com/gogo/protobuf"
packages = [
"gogoproto",
@@ -78,37 +92,52 @@
"proto",
"protoc-gen-gogo/descriptor",
"sortkeys",
"types"
"types",
]
pruneopts = "UT"
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
version = "v1.1.1"
[[projects]]
digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp"
"ptypes/timestamp",
]
pruneopts = "UT"
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:4a0c6bb4805508a6287675fac876be2ac1182539ca8a32468d8128882e9d5009"
name = "github.com/golang/snappy"
packages = ["."]
pruneopts = "UT"
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
[[projects]]
digest = "1:43dd08a10854b2056e615d1b1d22ac94559d822e1f8b6fcc92c1a1057e85188e"
name = "github.com/gorilla/websocket"
packages = ["."]
pruneopts = "UT"
revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
version = "v1.2.0"
[[projects]]
branch = "master"
digest = "1:b0c25f00bad20d783d259af2af8666969e2fc343fa0dc9efe52936bbd67fb758"
name = "github.com/rs/cors"
packages = ["."]
pruneopts = "UT"
revision = "9a47f48565a795472d43519dd49aac781f3034fb"
version = "v1.6.0"
[[projects]]
digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8"
name = "github.com/hashicorp/hcl"
packages = [
".",
@@ -119,154 +148,198 @@
"hcl/token",
"json/parser",
"json/scanner",
"json/token"
"json/token",
]
revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168"
pruneopts = "UT"
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
version = "v1.0.0"
[[projects]]
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
pruneopts = "UT"
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
branch = "master"
digest = "1:39b27d1381a30421f9813967a5866fba35dc1d4df43a6eefe3b7a5444cb07214"
name = "github.com/jmhodges/levigo"
packages = ["."]
pruneopts = "UT"
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
[[projects]]
branch = "master"
digest = "1:a64e323dc06b73892e5bb5d040ced475c4645d456038333883f58934abbf6f72"
name = "github.com/kr/logfmt"
packages = ["."]
pruneopts = "UT"
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
[[projects]]
digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7"
name = "github.com/magiconair/properties"
packages = ["."]
pruneopts = "UT"
revision = "c2353362d570a7bfa228149c62842019201cfb71"
version = "v1.8.0"
[[projects]]
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
pruneopts = "UT"
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
branch = "master"
digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac"
pruneopts = "UT"
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
version = "v1.1.2"
[[projects]]
digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e"
name = "github.com/pelletier/go-toml"
packages = ["."]
pruneopts = "UT"
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
version = "v1.2.0"
[[projects]]
digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = "UT"
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
pruneopts = "UT"
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
digest = "1:c1a04665f9613e082e1209cf288bf64f4068dcd6c87a64bf1c4ff006ad422ba0"
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
"prometheus/promhttp"
"prometheus/promhttp",
]
pruneopts = "UT"
revision = "ae27198cdd90bf12cd134ad79d1366a6cf49f632"
[[projects]]
branch = "master"
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
name = "github.com/prometheus/client_model"
packages = ["go"]
pruneopts = "UT"
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
[[projects]]
branch = "master"
digest = "1:db712fde5d12d6cdbdf14b777f0c230f4ff5ab0be8e35b239fc319953ed577a4"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model"
"model",
]
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
pruneopts = "UT"
revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6"
[[projects]]
branch = "master"
digest = "1:ef74914912f99c79434d9c09658274678bc85080ebe3ab32bec3940ebce5e1fc"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/util",
"nfs",
"xfs"
"xfs",
]
revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a"
pruneopts = "UT"
revision = "185b4288413d2a0dd0806f78c90dde719829e5ae"
[[projects]]
digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
pruneopts = "UT"
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
[[projects]]
digest = "1:6a4a11ba764a56d2758899ec6f3848d24698d48442ebce85ee7a3f63284526cd"
name = "github.com/spf13/afero"
packages = [
".",
"mem"
"mem",
]
revision = "787d034dfe70e44075ccc060d346146ef53270ad"
version = "v1.1.1"
pruneopts = "UT"
revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd"
version = "v1.1.2"
[[projects]]
digest = "1:08d65904057412fc0270fc4812a1c90c594186819243160dc779a402d4b6d0bc"
name = "github.com/spf13/cast"
packages = ["."]
revision = "8965335b8c7107321228e3e3702cab9832751bac"
version = "v1.2.0"
pruneopts = "UT"
revision = "8c9545af88b134710ab1cd196795e7f2388358d7"
version = "v1.3.0"
[[projects]]
digest = "1:7ffc0983035bc7e297da3688d9fe19d60a420e9c38bef23f845c53788ed6a05e"
name = "github.com/spf13/cobra"
packages = ["."]
pruneopts = "UT"
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
version = "v0.0.1"
[[projects]]
branch = "master"
digest = "1:68ea4e23713989dc20b1bded5d9da2c5f9be14ff9885beef481848edd18c26cb"
name = "github.com/spf13/jwalterweatherman"
packages = ["."]
revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394"
pruneopts = "UT"
revision = "4a4406e478ca629068e7768fc33f3f044173c0a6"
version = "v1.0.0"
[[projects]]
digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2"
name = "github.com/spf13/pflag"
packages = ["."]
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
pruneopts = "UT"
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
[[projects]]
digest = "1:f8e1a678a2571e265f4bf91a3e5e32aa6b1474a55cb0ea849750cc177b664d96"
name = "github.com/spf13/viper"
packages = ["."]
pruneopts = "UT"
revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7"
version = "v1.0.0"
[[projects]]
digest = "1:7e8d267900c7fa7f35129a2a37596e38ed0f11ca746d6d9ba727980ee138f9f6"
name = "github.com/stretchr/testify"
packages = [
"assert",
"require"
"require",
]
pruneopts = "UT"
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
version = "v1.2.1"
[[projects]]
branch = "master"
digest = "1:59483b8e8183f10ab21a85ba1f4cbb4a2335d48891801f79ed7b9499f44d383c"
name = "github.com/syndtr/goleveldb"
packages = [
"leveldb",
@@ -280,34 +353,36 @@
"leveldb/opt",
"leveldb/storage",
"leveldb/table",
"leveldb/util"
"leveldb/util",
]
revision = "c4c61651e9e37fa117f53c5a906d3b63090d8445"
pruneopts = "UT"
revision = "6b91fda63f2e36186f1c9d0e48578defb69c5d43"
[[projects]]
branch = "master"
name = "github.com/tendermint/ed25519"
packages = [
".",
"edwards25519",
"extra25519"
]
revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057"
digest = "1:605b6546f3f43745695298ec2d342d3e952b6d91cdf9f349bea9315f677d759f"
name = "github.com/tendermint/btcd"
packages = ["btcec"]
pruneopts = "UT"
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
[[projects]]
digest = "1:ad9c4c1a4e7875330b1f62906f2830f043a23edb5db997e3a5ac5d3e6eadf80a"
name = "github.com/tendermint/go-amino"
packages = ["."]
revision = "2106ca61d91029c931fd54968c2bb02dc96b1412"
version = "0.10.1"
pruneopts = "UT"
revision = "dc14acf9ef15f85828bfbc561ed9dd9d2a284885"
version = "v0.14.1"
[[projects]]
branch = "master"
digest = "1:72b71e3a29775e5752ed7a8012052a3dee165e27ec18cedddae5288058f09acf"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"chacha20poly1305",
"curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"hkdf",
"internal/chacha20",
"internal/subtle",
@@ -317,11 +392,14 @@
"openpgp/errors",
"poly1305",
"ripemd160",
"salsa20/salsa"
"salsa20/salsa",
]
revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9"
pruneopts = "UT"
revision = "3764759f34a542a3aef74d6b02e35be7ab893bba"
source = "github.com/tendermint/crypto"
[[projects]]
digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1"
name = "golang.org/x/net"
packages = [
"context",
@@ -331,20 +409,24 @@
"idna",
"internal/timeseries",
"netutil",
"trace"
"trace",
]
pruneopts = "UT"
revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f"
[[projects]]
branch = "master"
digest = "1:6f86e2f2e2217cd4d74dec6786163cf80e4d2b99adb341ecc60a45113b844dca"
name = "golang.org/x/sys"
packages = [
"cpu",
"unix"
"unix",
]
revision = "ac767d655b305d4e9612f5f6e33120b9176c4ad4"
pruneopts = "UT"
revision = "7e31e0c00fa05cb5fbf4347b585621d6709e19a4"
[[projects]]
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
name = "golang.org/x/text"
packages = [
"collate",
@@ -360,17 +442,22 @@
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
"unicode/rangetable",
]
pruneopts = "UT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:56b0bca90b7e5d1facf5fbdacba23e4e0ce069d25381b8e2f70ef1e7ebfb9c1a"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
pruneopts = "UT"
revision = "b69ba1387ce2108ac9bc8e8e5e5a46e7d5c72313"
[[projects]]
digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74"
name = "google.golang.org/grpc"
packages = [
".",
@@ -397,20 +484,69 @@
"stats",
"status",
"tap",
"transport"
"transport",
]
pruneopts = "UT"
revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
version = "v1.13.0"
[[projects]]
digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "UT"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "8e519c3716c259c6ecdb052889dd3602539fd98a3650dfbf50a4023e087a5d53"
input-imports = [
"github.com/btcsuite/btcutil/base58",
"github.com/btcsuite/btcutil/bech32",
"github.com/fortytw2/leaktest",
"github.com/go-kit/kit/log",
"github.com/go-kit/kit/log/level",
"github.com/go-kit/kit/log/term",
"github.com/go-kit/kit/metrics",
"github.com/go-kit/kit/metrics/discard",
"github.com/go-kit/kit/metrics/prometheus",
"github.com/go-logfmt/logfmt",
"github.com/gogo/protobuf/gogoproto",
"github.com/gogo/protobuf/jsonpb",
"github.com/gogo/protobuf/proto",
"github.com/gogo/protobuf/types",
"github.com/golang/protobuf/proto",
"github.com/golang/protobuf/ptypes/timestamp",
"github.com/gorilla/websocket",
"github.com/jmhodges/levigo",
"github.com/pkg/errors",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",
"github.com/rcrowley/go-metrics",
"github.com/spf13/cobra",
"github.com/spf13/viper",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/require",
"github.com/syndtr/goleveldb/leveldb",
"github.com/syndtr/goleveldb/leveldb/errors",
"github.com/syndtr/goleveldb/leveldb/iterator",
"github.com/syndtr/goleveldb/leveldb/opt",
"github.com/tendermint/btcd/btcec",
"github.com/tendermint/go-amino",
"golang.org/x/crypto/bcrypt",
"golang.org/x/crypto/chacha20poly1305",
"golang.org/x/crypto/curve25519",
"golang.org/x/crypto/ed25519",
"golang.org/x/crypto/hkdf",
"golang.org/x/crypto/nacl/box",
"golang.org/x/crypto/nacl/secretbox",
"golang.org/x/crypto/openpgp/armor",
"golang.org/x/crypto/ripemd160",
"golang.org/x/net/context",
"golang.org/x/net/netutil",
"google.golang.org/grpc",
"google.golang.org/grpc/credentials",
]
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -10,11 +10,6 @@
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
@@ -45,6 +40,10 @@
name = "github.com/gorilla/websocket"
version = "=1.2.0"
[[constraint]]
name = "github.com/rs/cors"
version = "1.6.0"
[[constraint]]
name = "github.com/pkg/errors"
version = "=0.8.0"
@@ -63,7 +62,7 @@
[[constraint]]
name = "github.com/tendermint/go-amino"
version = "=v0.10.1"
version = "v0.14.1"
[[constraint]]
name = "google.golang.org/grpc"
@@ -77,29 +76,24 @@
## Some repos dont have releases.
## Pin to revision
## We can remove this one by updating protobuf to v1.1.0
## but then the grpc tests break with
#--- FAIL: TestBroadcastTx (0.01s)
#panic: message/group field common.KVPair:bytes without pointer [recovered]
# panic: message/group field common.KVPair:bytes without pointer
#
# ...
#
# github.com/tendermint/tendermint/rpc/grpc_test.TestBroadcastTx(0xc420a5ab40)
# /go/src/github.com/tendermint/tendermint/rpc/grpc/grpc_test.go:29 +0x141
[[override]]
name = "google.golang.org/genproto"
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
[[constraint]]
name = "github.com/ebuchman/fail-test"
revision = "95f809107225be108efcf10a3509e4ea6ceef3c4"
name = "golang.org/x/crypto"
source = "github.com/tendermint/crypto"
revision = "3764759f34a542a3aef74d6b02e35be7ab893bba"
[[override]]
name = "github.com/jmhodges/levigo"
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
# last revision used by go-crypto
[[constraint]]
name = "github.com/btcsuite/btcutil"
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
[[constraint]]
name = "github.com/tendermint/btcd"
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
# Haven't made a release since 2016.
[[constraint]]
name = "github.com/prometheus/client_golang"

View File

@@ -1,50 +1,58 @@
GOTOOLS = \
github.com/mitchellh/gox \
github.com/golang/dep/cmd/dep \
gopkg.in/alecthomas/gometalinter.v2 \
github.com/alecthomas/gometalinter \
github.com/gogo/protobuf/protoc-gen-gogo \
github.com/gogo/protobuf/gogoproto \
github.com/square/certstrap
GOBIN?=${GOPATH}/bin
PACKAGES=$(shell go list ./...)
INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf
BUILD_TAGS?=tendermint
BUILD_TAGS?='tendermint'
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`"
LINT_FLAGS = --exclude '.*\.pb\.go' --exclude 'vendor/*' --vendor --deadline=600s
all: check build test install
check: check_tools ensure_deps
check: check_tools get_vendor_deps
########################################
### Build Tendermint
build:
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/
build_c:
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o build/tendermint ./cmd/tendermint/
build_race:
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint
install:
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
########################################
### Protobuf
protoc_all: protoc_libs protoc_abci protoc_grpc
protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc protoc_proto3types
%.pb.go: %.proto
## If you get the following error,
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
## See https://stackoverflow.com/a/25518702
protoc $(INCLUDE) $< --gogo_out=plugins=grpc:.
@echo "--> adding nolint declarations to protobuf generated files"
@awk -i inplace '/^\s*package \w+/ { print "//nolint" }1' $@
## Note the $< here is substituted for the %.proto
## Note the $@ here is substituted for the %.pb.go
protoc $(INCLUDE) $< --gogo_out=Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,plugins=grpc:.
########################################
### Build ABCI
# see protobuf section above
protoc_abci: abci/types/types.pb.go
protoc_proto3types: types/proto3/block.pb.go
build_abci:
@go build -i ./abci/cmd/...
@@ -57,7 +65,7 @@ install_abci:
# dist builds binaries for all platforms and packages them for distribution
# TODO add abci to these scripts
dist:
@BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'"
@BUILD_TAGS=$(BUILD_TAGS) sh -c "'$(CURDIR)/scripts/dist.sh'"
########################################
### Tools & dependencies
@@ -69,36 +77,33 @@ check_tools:
get_tools:
@echo "--> Installing tools"
go get -u -v $(GOTOOLS)
@gometalinter.v2 --install
./scripts/get_tools.sh
get_dev_tools:
@echo "--> Downloading linters (this may take awhile)"
$(GOPATH)/src/github.com/alecthomas/gometalinter/scripts/install.sh -b $(GOBIN)
update_tools:
@echo "--> Updating tools"
@go get -u $(GOTOOLS)
./scripts/get_tools.sh
#Run this from CI
#Update dependencies
get_vendor_deps:
@rm -rf vendor/
@echo "--> Running dep"
@dep ensure -vendor-only
#Run this locally.
ensure_deps:
@rm -rf vendor/
@echo "--> Running dep"
@dep ensure
#For ABCI and libs
get_protoc:
@# https://github.com/google/protobuf/releases
curl -L https://github.com/google/protobuf/releases/download/v3.4.1/protobuf-cpp-3.4.1.tar.gz | tar xvz && \
cd protobuf-3.4.1 && \
curl -L https://github.com/google/protobuf/releases/download/v3.6.1/protobuf-cpp-3.6.1.tar.gz | tar xvz && \
cd protobuf-3.6.1 && \
DIST_LANG=cpp ./configure && \
make && \
make install && \
make check && \
sudo make install && \
sudo ldconfig && \
cd .. && \
rm -rf protobuf-3.4.1
rm -rf protobuf-3.6.1
draw_deps:
@# requires brew install graphviz or apt-get install graphviz
@@ -107,7 +112,7 @@ draw_deps:
get_deps_bin_size:
@# Copy of build recipe with additional flags to perform binary size analysis
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ 2>&1))
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ 2>&1))
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
@@ -137,6 +142,8 @@ grpc_dbserver:
protoc_grpc: rpc/grpc/types.pb.go
protoc_merkle: crypto/merkle/merkle.pb.go
########################################
### Testing
@@ -180,6 +187,9 @@ test_p2p:
cd ..
# requires 'tester' the image from above
bash test/p2p/test.sh tester
# the `docker cp` takes a really long time; uncomment for debugging
#
# mkdir -p test/p2p/logs && docker cp rsyslog:/var/log test/p2p/logs
test_integrations:
make build_docker_test_image
@@ -207,11 +217,11 @@ vagrant_test:
### go tests
test:
@echo "--> Running go test"
@go test $(PACKAGES)
@GOCACHE=off go test -p 1 $(PACKAGES)
test_race:
@echo "--> Running go test --race"
@go test -v -race $(PACKAGES)
@GOCACHE=off go test -p 1 -v -race $(PACKAGES)
########################################
@@ -222,7 +232,7 @@ fmt:
metalinter:
@echo "--> Running linter"
@gometalinter.v2 --vendor --deadline=600s --disable-all \
@gometalinter $(LINT_FLAGS) --disable-all \
--enable=deadcode \
--enable=gosimple \
--enable=misspell \
@@ -251,7 +261,17 @@ metalinter:
metalinter_all:
@echo "--> Running linter (all)"
gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./...
gometalinter $(LINT_FLAGS) --enable-all --disable=lll ./...
DESTINATION = ./index.html.md
rpc-docs:
cat rpc/core/slate_header.txt > $(DESTINATION)
godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's,/src/target,https://github.com/tendermint/tendermint/tree/master/rpc/core,' >> $(DESTINATION)
check_dep:
dep status >> /dev/null
!(grep -n branch Gopkg.toml)
###########################################################
### Docker image
@@ -308,4 +328,4 @@ build-slate:
# To avoid unintended conflicts with file names, always add to .PHONY
# unless there is a reason not to.
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
.PHONY: check build build_race build_abci dist install install_abci check_tools get_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all
.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools get_dev_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all

View File

@@ -8,7 +8,7 @@ Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short.
[![API Reference](
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
)](https://godoc.org/github.com/tendermint/tendermint)
[![Go version](https://img.shields.io/badge/go-1.9.2-blue.svg)](https://github.com/moovweb/gvm)
[![Go version](https://img.shields.io/badge/go-1.10.4-blue.svg)](https://github.com/moovweb/gvm)
[![riot.im](https://img.shields.io/badge/riot.im-JOIN%20CHAT-green.svg)](https://riot.im/app/#/room/#tendermint:matrix.org)
[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE)
[![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint)
@@ -24,21 +24,24 @@ and securely replicates it on many machines.
For protocol details, see [the specification](/docs/spec).
For detailed analysis of the consensus protocol, including safety and liveness proofs,
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
## A Note on Production Readiness
While Tendermint is being used in production in private, permissioned
environments, we are still working actively to harden and audit it in preparation
for use in public blockchains, such as the [Cosmos Network](https://cosmos.network/).
We are also still making breaking changes to the protocol and the APIs.
Thus we tag the releases as *alpha software*.
Thus, we tag the releases as *alpha software*.
In any case, if you intend to run Tendermint in production,
please [contact us](https://riot.im/app/#/room/#tendermint:matrix.org) :)
please [contact us](mailto:partners@tendermint.com) and [join the chat](https://riot.im/app/#/room/#tendermint:matrix.org).
## Security
To report a security vulnerability, see our [bug bounty
program](https://tendermint.com/security).
program](https://hackerone.com/tendermint)
For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.md)
@@ -46,18 +49,22 @@ For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.
Requirement|Notes
---|---
Go version | Go1.9 or higher
Go version | Go1.10 or higher
## Install
## Documentation
Complete documentation can be found on the [website](https://tendermint.com/docs/).
### Install
See the [install instructions](/docs/introduction/install.md)
## Quick Start
### Quick Start
- [Single node](/docs/using-tendermint.md)
- [Local cluster using docker-compose](/networks/local)
- [Single node](/docs/introduction/quick-start.md)
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
- [Remote cluster using terraform and ansible](/docs/networks/terraform-and-ansible.md)
- [Join the public testnet](https://cosmos.network/testnet)
- [Join the Cosmos testnet](https://cosmos.network/testnet)
## Resources
@@ -66,30 +73,32 @@ See the [install instructions](/docs/introduction/install.md)
For details about the blockchain data structures and the p2p protocols, see the
the [Tendermint specification](/docs/spec).
For details on using the software, [Read The Docs](https://tendermint.readthedocs.io/en/master/).
Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs.
For details on using the software, see the [documentation](/docs/) which is also
hosted at: https://tendermint.com/docs/
### Tools
Benchmarking and monitoring is provided by `tm-bench` and `tm-monitor`, respectively.
Their code is found [here](/tools) and these binaries need to be built seperately.
Additional documentation is found [here](/docs/tools).
### Sub-projects
* [Amino](http://github.com/tendermint/go-amino), a reflection-based improvement on proto3
* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation
### Tools
* [Deployment, Benchmarking, and Monitoring](http://tendermint.readthedocs.io/projects/tools/en/develop/index.html#tendermint-tools)
### Applications
* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
* [Ethermint](http://github.com/tendermint/ethermint); Ethereum on Tendermint
* [Many more](https://tendermint.readthedocs.io/en/master/ecosystem.html#abci-applications)
* [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
* [Many more](https://tendermint.com/ecosystem)
### More
### Research
* [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf)
* [Tendermint Blog](https://blog.cosmos.network/tendermint/home)
* [Cosmos Blog](https://blog.cosmos.network)
* [Blog](https://blog.cosmos.network/tendermint/home)
## Contributing
@@ -114,6 +123,12 @@ CHANGELOG even if they don't lead to MINOR version bumps:
- rpc/client
- config
- node
- libs
- bech32
- common
- db
- errors
- log
Exported objects in these packages that are not covered by the versioning scheme
are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any
@@ -130,6 +145,8 @@ data into the new chain.
However, any bump in the PATCH version should be compatible with existing histories
(if not please open an [issue](https://github.com/tendermint/tendermint/issues)).
For more information on upgrading, see [here](./UPGRADING.md)
## Code of Conduct
Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md).

View File

@@ -1,7 +1,8 @@
# Security
As part of our [Coordinated Vulnerability Disclosure
Policy](https://tendermint.com/security), we operate a bug bounty.
Policy](https://tendermint.com/security), we operate a [bug
bounty](https://hackerone.com/tendermint).
See the policy for more details on submissions and rewards.
Here is a list of examples of the kinds of bugs we're most interested in:

152
UPGRADING.md Normal file
View File

@@ -0,0 +1,152 @@
# Upgrading Tendermint Core
This guide provides steps to be followed when you upgrade your applications to
a newer version of Tendermint Core.
## v0.26.0
New 0.26.0 release contains a lot of changes to core data types and protocols. It is not
compatible to the old versions and there is no straight forward way to update
old data to be compatible with the new version.
To reset the state do:
```
$ tendermint unsafe_reset_all
```
Here we summarize some other notable changes to be mindful of.
### Config Changes
All timeouts must be changed from integers to strings with their duration, for
instance `flush_throttle_timeout = 100` would be changed to
`flush_throttle_timeout = "100ms"` and `timeout_propose = 3000` would be changed
to `timeout_propose = "3s"`.
### RPC Changes
The default behaviour of `/abci_query` has been changed to not return a proof,
and the name of the parameter that controls this has been changed from `trusted`
to `prove`. To get proofs with your queries, ensure you set `prove=true`.
Various version fields like `amino_version`, `p2p_version`, `consensus_version`,
and `rpc_version` have been removed from the `node_info.other` and are
consolidated under the tendermint semantic version (ie. `node_info.version`) and
the new `block` and `p2p` protocol versions under `node_info.protocol_version`.
### ABCI Changes
Field numbers were bumped in the `Header` and `ResponseInfo` messages to make
room for new `version` fields. It should be straight forward to recompile the
protobuf file for these changes.
#### Proofs
The `ResponseQuery.Proof` field is now structured as a `[]ProofOp` to support
generalized Merkle tree constructions where the leaves of one Merkle tree are
the root of another. If you don't need this functionality, and you used to
return `<proof bytes>` here, you should instead return a single `ProofOp` with
just the `Data` field set:
```
[]ProofOp{
ProofOp{
Data: <proof bytes>,
}
}
```
For more information, see:
- [ADR-026](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/docs/architecture/adr-026-general-merkle-proof.md)
- [Relevant ABCI
documentation](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/docs/spec/abci/apps.md#query-proofs)
- [Description of
keys](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/crypto/merkle/proof_key_path.go#L14)
### Go API Changes
#### crypto.merkle
The `merkle.Hasher` interface was removed. Functions which used to take `Hasher`
now simply take `[]byte`. This means that any objects being Merklized should be
serialized before they are passed in.
#### node
The `node.RunForever` function was removed. Signal handling and running forever
should instead be explicitly configured by the caller. See how we do it
[here](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/cmd/tendermint/commands/run_node.go#L60).
### Other
All hashes, except for public key addresses, are now 32-bytes.
## v0.25.0
This release has minimal impact.
If you use GasWanted in ABCI and want to enforce it, set the MaxGas in the genesis file (default is no max).
## v0.24.0
New 0.24.0 release contains a lot of changes to the state and types. It's not
compatible to the old versions and there is no straight forward way to update
old data to be compatible with the new version.
To reset the state do:
```
$ tendermint unsafe_reset_all
```
Here we summarize some other notable changes to be mindful of.
### Config changes
`p2p.max_num_peers` was removed in favor of `p2p.max_num_inbound_peers` and
`p2p.max_num_outbound_peers`.
```
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
```
As you can see, the default ratio of inbound/outbound peers is 4/1. The reason
is we want it to be easier for new nodes to connect to the network. You can
tweak these parameters to alter the network topology.
### RPC Changes
The result of `/commit` used to contain `header` and `commit` fields at the top level. These are now contained under the `signed_header` field.
### ABCI Changes
The header has been upgraded and contains new fields, but none of the existing
fields were changed, except their order.
The `Validator` type was split into two, one containing an `Address` and one
containing a `PubKey`. When processing `RequestBeginBlock`, use the `Validator`
type, which contains just the `Address`. When returning `ResponseEndBlock`, use
the `ValidatorUpdate` type, which contains just the `PubKey`.
### Validator Set Updates
Validator set updates returned in ResponseEndBlock for height `H` used to take
effect immediately at height `H+1`. Now they will be delayed one block, to take
effect at height `H+2`. Note this means that the change will be seen by the ABCI
app in the `RequestBeginBlock.LastCommitInfo` at block `H+3`. Apps were already
required to maintain a map from validator addresses to pubkeys since v0.23 (when
pubkeys were removed from RequestBeginBlock), but now they may need to track
multiple validator sets at once to accomodate this delay.
### Block Size
The `ConsensusParams.BlockSize.MaxTxs` was removed in favour of
`ConsensusParams.BlockSize.MaxBytes`, which is now enforced. This means blocks
are limitted only by byte-size, not by number of transactions.

6
Vagrantfile vendored
View File

@@ -29,10 +29,10 @@ Vagrant.configure("2") do |config|
usermod -a -G docker vagrant
# install go
wget -q https://dl.google.com/go/go1.10.1.linux-amd64.tar.gz
tar -xvf go1.10.1.linux-amd64.tar.gz
wget -q https://dl.google.com/go/go1.11.linux-amd64.tar.gz
tar -xvf go1.11.linux-amd64.tar.gz
mv go /usr/local
rm -f go1.10.1.linux-amd64.tar.gz
rm -f go1.11.linux-amd64.tar.gz
# cleanup
apt-get autoremove -y

View File

@@ -1,7 +1,5 @@
# Application BlockChain Interface (ABCI)
[![CircleCI](https://circleci.com/gh/tendermint/abci.svg?style=svg)](https://circleci.com/gh/tendermint/abci)
Blockchains are systems for multi-master state machine replication.
**ABCI** is an interface that defines the boundary between the replication engine (the blockchain),
and the state machine (the application).
@@ -12,157 +10,28 @@ Previously, the ABCI was referred to as TMSP.
The community has provided a number of addtional implementations, see the [Tendermint Ecosystem](https://tendermint.com/ecosystem)
## Installation & Usage
To get up and running quickly, see the [getting started guide](../docs/app-dev/getting-started.md) along with the [abci-cli documentation](../docs/app-dev/abci-cli.md) which will go through the examples found in the [examples](./example/) directory.
## Specification
A detailed description of the ABCI methods and message types is contained in:
- [A prose specification](specification.md)
- [A protobuf file](https://github.com/tendermint/abci/blob/master/types/types.proto)
- [A Go interface](https://github.com/tendermint/abci/blob/master/types/application.go).
- [The main spec](../docs/spec/abci/abci.md)
- [A protobuf file](./types/types.proto)
- [A Go interface](./types/application.go)
For more background information on ABCI, motivations, and tendermint, please visit [the documentation](http://tendermint.readthedocs.io/en/master/).
The two guides to focus on are the `Application Development Guide` and `Using ABCI-CLI`.
## Protocol Buffers
## Protocl Buffers
To compile the protobuf file, run:
To compile the protobuf file, run (from the root of the repo):
```
make protoc
make protoc_abci
```
See `protoc --help` and [the Protocol Buffers site](https://developers.google.com/protocol-buffers)
for details on compiling for other languages. Note we also include a [GRPC](http://www.grpc.io/docs)
for details on compiling for other languages. Note we also include a [GRPC](https://www.grpc.io/docs)
service definition.
## Install ABCI-CLI
The `abci-cli` is a simple tool for debugging ABCI servers and running some
example apps. To install it:
```
go get github.com/tendermint/abci
cd $GOPATH/src/github.com/tendermint/abci
make get_vendor_deps
make install
```
## Implementation
We provide three implementations of the ABCI in Go:
- Golang in-process
- ABCI-socket
- GRPC
Note the GRPC version is maintained primarily to simplify onboarding and prototyping and is not receiving the same
attention to security and performance as the others
### In Process
The simplest implementation just uses function calls within Go.
This means ABCI applications written in Golang can be compiled with TendermintCore and run as a single binary.
See the [examples](#examples) below for more information.
### Socket (TSP)
ABCI is best implemented as a streaming protocol.
The socket implementation provides for asynchronous, ordered message passing over unix or tcp.
Messages are serialized using Protobuf3 and length-prefixed with a [signed Varint](https://developers.google.com/protocol-buffers/docs/encoding?csw=1#signed-integers)
For example, if the Protobuf3 encoded ABCI message is `0xDEADBEEF` (4 bytes), the length-prefixed message is `0x08DEADBEEF`, since `0x08` is the signed varint
encoding of `4`. If the Protobuf3 encoded ABCI message is 65535 bytes long, the length-prefixed message would be like `0xFEFF07...`.
Note the benefit of using this `varint` encoding over the old version (where integers were encoded as `<len of len><big endian len>` is that
it is the standard way to encode integers in Protobuf. It is also generally shorter.
### GRPC
GRPC is an rpc framework native to Protocol Buffers with support in many languages.
Implementing the ABCI using GRPC can allow for faster prototyping, but is expected to be much slower than
the ordered, asynchronous socket protocol. The implementation has also not received as much testing or review.
Note the length-prefixing used in the socket implementation does not apply for GRPC.
## Usage
The `abci-cli` tool wraps an ABCI client and can be used for probing/testing an ABCI server.
For instance, `abci-cli test` will run a test sequence against a listening server running the Counter application (see below).
It can also be used to run some example applications.
See [the documentation](http://tendermint.readthedocs.io/en/master/) for more details.
### Examples
Check out the variety of example applications in the [example directory](example/).
It also contains the code refered to by the `counter` and `kvstore` apps; these apps come
built into the `abci-cli` binary.
#### Counter
The `abci-cli counter` application illustrates nonce checking in transactions. It's code looks like:
```golang
func cmdCounter(cmd *cobra.Command, args []string) error {
app := counter.NewCounterApplication(flagSerial)
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Start the listener
srv, err := server.NewServer(flagAddrC, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Wait forever
cmn.TrapSignal(func() {
// Cleanup
srv.Stop()
})
return nil
}
```
and can be found in [this file](cmd/abci-cli/abci-cli.go).
#### kvstore
The `abci-cli kvstore` application, which illustrates a simple key-value Merkle tree
```golang
func cmdKVStore(cmd *cobra.Command, args []string) error {
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Create the application - in memory or persisted to disk
var app types.Application
if flagPersist == "" {
app = kvstore.NewKVStoreApplication()
} else {
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
}
// Start the listener
srv, err := server.NewServer(flagAddrD, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Wait forever
cmn.TrapSignal(func() {
// Cleanup
srv.Stop()
})
return nil
}
```

View File

@@ -105,8 +105,8 @@ func (reqRes *ReqRes) SetCallback(cb func(res *types.Response)) {
return
}
defer reqRes.mtx.Unlock()
reqRes.cb = cb
reqRes.mtx.Unlock()
}
func (reqRes *ReqRes) GetCallback() func(*types.Response) {

View File

@@ -22,6 +22,7 @@ type grpcClient struct {
mustConnect bool
client types.ABCIApplicationClient
conn *grpc.ClientConn
mtx sync.Mutex
addr string
@@ -60,10 +61,11 @@ RETRY_LOOP:
cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
client := types.NewABCIApplicationClient(conn)
cli.conn = conn
ENSURE_CONNECTED:
for {
_, err := client.Echo(context.Background(), &types.RequestEcho{"hello"}, grpc.FailFast(true))
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.FailFast(true))
if err == nil {
break ENSURE_CONNECTED
}
@@ -78,12 +80,10 @@ RETRY_LOOP:
func (cli *grpcClient) OnStop() {
cli.BaseService.OnStop()
cli.mtx.Lock()
defer cli.mtx.Unlock()
// TODO: how to close conn? its not a net.Conn and grpc doesn't expose a Close()
/*if cli.client.conn != nil {
cli.client.conn.Close()
}*/
if cli.conn != nil {
cli.conn.Close()
}
}
func (cli *grpcClient) StopForError(err error) {
@@ -111,8 +111,8 @@ func (cli *grpcClient) Error() error {
// NOTE: callback may get internally generated flush responses.
func (cli *grpcClient) SetResponseCallback(resCb Callback) {
cli.mtx.Lock()
defer cli.mtx.Unlock()
cli.resCb = resCb
cli.mtx.Unlock()
}
//----------------------------------------
@@ -129,7 +129,7 @@ func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_Echo{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{res}})
}
func (cli *grpcClient) FlushAsync() *ReqRes {
@@ -138,7 +138,7 @@ func (cli *grpcClient) FlushAsync() *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_Flush{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{res}})
}
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
@@ -147,7 +147,7 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_Info{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{res}})
}
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
@@ -156,7 +156,7 @@ func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_SetOption{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{res}})
}
func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
@@ -165,7 +165,7 @@ func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_DeliverTx{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{res}})
}
func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
@@ -174,7 +174,7 @@ func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_CheckTx{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{res}})
}
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
@@ -183,7 +183,7 @@ func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_Query{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{res}})
}
func (cli *grpcClient) CommitAsync() *ReqRes {
@@ -192,7 +192,7 @@ func (cli *grpcClient) CommitAsync() *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_Commit{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{res}})
}
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
@@ -201,7 +201,7 @@ func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_InitChain{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{res}})
}
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
@@ -210,7 +210,7 @@ func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_BeginBlock{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{res}})
}
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
@@ -219,7 +219,7 @@ func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_EndBlock{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{res}})
}
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {

View File

@@ -9,8 +9,13 @@ import (
var _ Client = (*localClient)(nil)
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
// case of malicious tx or query). It only makes sense for publicly exposed
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
// RPC endpoint), but defers are used everywhere for the sake of consistency.
type localClient struct {
cmn.BaseService
mtx *sync.Mutex
types.Application
Callback
@@ -30,8 +35,8 @@ func NewLocalClient(mtx *sync.Mutex, app types.Application) *localClient {
func (app *localClient) SetResponseCallback(cb Callback) {
app.mtx.Lock()
defer app.mtx.Unlock()
app.Callback = cb
app.mtx.Unlock()
}
// TODO: change types.Application to include Error()?
@@ -45,6 +50,9 @@ func (app *localClient) FlushAsync() *ReqRes {
}
func (app *localClient) EchoAsync(msg string) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
return app.callback(
types.ToRequestEcho(msg),
types.ToResponseEcho(msg),
@@ -53,8 +61,9 @@ func (app *localClient) EchoAsync(msg string) *ReqRes {
func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Info(req)
app.mtx.Unlock()
return app.callback(
types.ToRequestInfo(req),
types.ToResponseInfo(res),
@@ -63,8 +72,9 @@ func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.SetOption(req)
app.mtx.Unlock()
return app.callback(
types.ToRequestSetOption(req),
types.ToResponseSetOption(res),
@@ -73,8 +83,9 @@ func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
func (app *localClient) DeliverTxAsync(tx []byte) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(tx)
app.mtx.Unlock()
return app.callback(
types.ToRequestDeliverTx(tx),
types.ToResponseDeliverTx(res),
@@ -83,8 +94,9 @@ func (app *localClient) DeliverTxAsync(tx []byte) *ReqRes {
func (app *localClient) CheckTxAsync(tx []byte) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.CheckTx(tx)
app.mtx.Unlock()
return app.callback(
types.ToRequestCheckTx(tx),
types.ToResponseCheckTx(res),
@@ -93,8 +105,9 @@ func (app *localClient) CheckTxAsync(tx []byte) *ReqRes {
func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Query(req)
app.mtx.Unlock()
return app.callback(
types.ToRequestQuery(req),
types.ToResponseQuery(res),
@@ -103,8 +116,9 @@ func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes {
func (app *localClient) CommitAsync() *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Commit()
app.mtx.Unlock()
return app.callback(
types.ToRequestCommit(),
types.ToResponseCommit(res),
@@ -113,19 +127,20 @@ func (app *localClient) CommitAsync() *ReqRes {
func (app *localClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.InitChain(req)
reqRes := app.callback(
return app.callback(
types.ToRequestInitChain(req),
types.ToResponseInitChain(res),
)
app.mtx.Unlock()
return reqRes
}
func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.BeginBlock(req)
app.mtx.Unlock()
return app.callback(
types.ToRequestBeginBlock(req),
types.ToResponseBeginBlock(res),
@@ -134,8 +149,9 @@ func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.EndBlock(req)
app.mtx.Unlock()
return app.callback(
types.ToRequestEndBlock(req),
types.ToResponseEndBlock(res),
@@ -149,69 +165,78 @@ func (app *localClient) FlushSync() error {
}
func (app *localClient) EchoSync(msg string) (*types.ResponseEcho, error) {
return &types.ResponseEcho{msg}, nil
return &types.ResponseEcho{Message: msg}, nil
}
func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Info(req)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.SetOption(req)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.DeliverTx(tx)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.CheckTx(tx)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Query(req)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) CommitSync() (*types.ResponseCommit, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.Commit()
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.InitChain(req)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.BeginBlock(req)
app.mtx.Unlock()
return &res, nil
}
func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
app.mtx.Lock()
defer app.mtx.Unlock()
res := app.Application.EndBlock(req)
app.mtx.Unlock()
return &res, nil
}

View File

@@ -118,8 +118,8 @@ func (cli *socketClient) Error() error {
// NOTE: callback may get internally generated flush responses.
func (cli *socketClient) SetResponseCallback(resCb Callback) {
cli.mtx.Lock()
defer cli.mtx.Unlock()
cli.resCb = resCb
cli.mtx.Unlock()
}
//----------------------------------------

View File

@@ -22,6 +22,7 @@ import (
servertest "github.com/tendermint/tendermint/abci/tests/server"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/abci/version"
"github.com/tendermint/tendermint/crypto/merkle"
)
// client is a global variable so it can be reused by the console
@@ -100,7 +101,7 @@ type queryResponse struct {
Key []byte
Value []byte
Height int64
Proof []byte
Proof *merkle.Proof
}
func Execute() error {
@@ -477,11 +478,8 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
}
func cmdUnimplemented(cmd *cobra.Command, args []string) error {
// TODO: Print out all the sub-commands available
msg := "unimplemented command"
if err := cmd.Help(); err != nil {
msg = err.Error()
}
if len(args) > 0 {
msg += fmt.Sprintf(" args: [%s]", strings.Join(args, " "))
}
@@ -489,6 +487,17 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
Code: codeBad,
Log: msg,
})
fmt.Println("Available commands:")
fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short)
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short)
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short)
fmt.Println("Use \"[command] --help\" for more information about a command.")
return nil
}
@@ -514,7 +523,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
if len(args) == 1 {
version = args[0]
}
res, err := client.InfoSync(types.RequestInfo{version})
res, err := client.InfoSync(types.RequestInfo{Version: version})
if err != nil {
return err
}
@@ -537,7 +546,7 @@ func cmdSetOption(cmd *cobra.Command, args []string) error {
}
key, val := args[0], args[1]
_, err := client.SetOptionSync(types.RequestSetOption{key, val})
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: val})
if err != nil {
return err
}
@@ -740,7 +749,7 @@ func printResponse(cmd *cobra.Command, args []string, rsp response) {
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
}
if rsp.Query.Proof != nil {
fmt.Printf("-> proof: %X\n", rsp.Query.Proof)
fmt.Printf("-> proof: %#v\n", rsp.Query.Proof)
}
}
}

View File

@@ -6,4 +6,5 @@ const (
CodeTypeEncodingError uint32 = 1
CodeTypeBadNonce uint32 = 2
CodeTypeUnauthorized uint32 = 3
CodeTypeUnknownError uint32 = 4
)

View File

@@ -6,7 +6,6 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
type CounterApplication struct {
@@ -22,7 +21,7 @@ func NewCounterApplication(serial bool) *CounterApplication {
}
func (app *CounterApplication) Info(req types.RequestInfo) types.ResponseInfo {
return types.ResponseInfo{Data: cmn.Fmt("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
}
func (app *CounterApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
@@ -34,7 +33,7 @@ func (app *CounterApplication) SetOption(req types.RequestSetOption) types.Respo
TODO Panic and have the ABCI server pass an exception.
The client can call SetOptionSync() and get an `error`.
return types.ResponseSetOption{
Error: cmn.Fmt("Unknown key (%s) or value (%s)", key, value),
Error: fmt.Sprintf("Unknown key (%s) or value (%s)", key, value),
}
*/
return types.ResponseSetOption{}
@@ -95,10 +94,10 @@ func (app *CounterApplication) Commit() (resp types.ResponseCommit) {
func (app *CounterApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
switch reqQuery.Path {
case "hash":
return types.ResponseQuery{Value: []byte(cmn.Fmt("%v", app.hashCount))}
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
case "tx":
return types.ResponseQuery{Value: []byte(cmn.Fmt("%v", app.txCount))}
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
default:
return types.ResponseQuery{Log: cmn.Fmt("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
}
}

View File

@@ -7,6 +7,8 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"golang.org/x/net/context"
@@ -37,13 +39,13 @@ func TestGRPC(t *testing.T) {
}
func testStream(t *testing.T, app types.Application) {
numDeliverTxs := 200000
numDeliverTxs := 20000
// Start the listener
server := abciserver.NewSocketServer("unix://test.sock", app)
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
if err := server.Start(); err != nil {
t.Fatalf("Error starting socket server: %v", err.Error())
require.NoError(t, err, "Error starting socket server")
}
defer server.Stop()
@@ -70,7 +72,7 @@ func testStream(t *testing.T, app types.Application) {
}
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 2) // Wait for a bit to allow counter overflow
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
close(done)
}()
return
@@ -132,7 +134,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) {
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{[]byte("test")})
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
if err != nil {
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
}
@@ -146,7 +148,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) {
t.Log("response", counter)
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 2) // Wait for a bit to allow counter overflow
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
}()
}

View File

@@ -7,12 +7,10 @@ import (
// RandVal creates one random validator, with a key derived
// from the input value
func RandVal(i int) types.Validator {
addr := cmn.RandBytes(20)
func RandVal(i int) types.ValidatorUpdate {
pubkey := cmn.RandBytes(32)
power := cmn.RandUint16() + 1
v := types.Ed25519Validator(pubkey, int64(power))
v.Address = addr
v := types.Ed25519ValidatorUpdate(pubkey, int64(power))
return v
}
@@ -20,8 +18,8 @@ func RandVal(i int) types.Validator {
// the application. Note that the keys are deterministically
// derived from the index in the array, while the power is
// random (Change this if not desired)
func RandVals(cnt int) []types.Validator {
res := make([]types.Validator, cnt)
func RandVals(cnt int) []types.ValidatorUpdate {
res := make([]types.ValidatorUpdate, cnt)
for i := 0; i < cnt; i++ {
res[i] = RandVal(i)
}

View File

@@ -10,11 +10,14 @@ import (
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/version"
)
var (
stateKey = []byte("stateKey")
kvPairPrefixKey = []byte("kvPairKey:")
ProtocolVersion version.Protocol = 0x1
)
type State struct {
@@ -65,7 +68,11 @@ func NewKVStoreApplication() *KVStoreApplication {
}
func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
return types.ResponseInfo{Data: fmt.Sprintf("{\"size\":%v}", app.state.Size)}
return types.ResponseInfo{
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
Version: version.ABCIVersion,
AppVersion: ProtocolVersion.Uint64(),
}
}
// tx is either "key=value" or just arbitrary bytes
@@ -81,14 +88,14 @@ func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
app.state.Size += 1
tags := []cmn.KVPair{
{[]byte("app.creator"), []byte("jae")},
{[]byte("app.key"), key},
{Key: []byte("app.creator"), Value: []byte("Cosmoshi Netowoko")},
{Key: []byte("app.key"), Value: key},
}
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags}
}
func (app *KVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
return types.ResponseCheckTx{Code: code.CodeTypeOK}
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
}
func (app *KVStoreApplication) Commit() types.ResponseCommit {
@@ -114,6 +121,7 @@ func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery type
}
return
} else {
resQuery.Key = reqQuery.Data
value := app.state.db.Get(prefixKey(reqQuery.Data))
resQuery.Value = value
if value != nil {

View File

@@ -2,6 +2,7 @@ package kvstore
import (
"bytes"
"fmt"
"io/ioutil"
"sort"
"testing"
@@ -90,8 +91,8 @@ func TestPersistentKVStoreInfo(t *testing.T) {
header := types.Header{
Height: int64(height),
}
kvstore.BeginBlock(types.RequestBeginBlock{hash, header, nil, nil})
kvstore.EndBlock(types.RequestEndBlock{header.Height})
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
kvstore.Commit()
resInfo = kvstore.Info(types.RequestInfo{})
@@ -121,11 +122,11 @@ func TestValUpdates(t *testing.T) {
vals1, vals2 := vals[:nInit], kvstore.Validators()
valsEqual(t, vals1, vals2)
var v1, v2, v3 types.Validator
var v1, v2, v3 types.ValidatorUpdate
// add some validators
v1, v2 = vals[nInit], vals[nInit+1]
diff := []types.Validator{v1, v2}
diff := []types.ValidatorUpdate{v1, v2}
tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power)
tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power)
@@ -139,7 +140,7 @@ func TestValUpdates(t *testing.T) {
v1.Power = 0
v2.Power = 0
v3.Power = 0
diff = []types.Validator{v1, v2, v3}
diff = []types.ValidatorUpdate{v1, v2, v3}
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power)
tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power)
@@ -157,18 +158,18 @@ func TestValUpdates(t *testing.T) {
} else {
v1.Power = 5
}
diff = []types.Validator{v1}
diff = []types.ValidatorUpdate{v1}
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
makeApplyBlock(t, kvstore, 3, diff, tx1)
vals1 = append([]types.Validator{v1}, vals1[1:]...)
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
vals2 = kvstore.Validators()
valsEqual(t, vals1, vals2)
}
func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.Validator, txs ...[]byte) {
func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, txs ...[]byte) {
// make and apply block
height := int64(heightInt)
hash := []byte("foo")
@@ -176,13 +177,13 @@ func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff
Height: height,
}
kvstore.BeginBlock(types.RequestBeginBlock{hash, header, nil, nil})
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
for _, tx := range txs {
if r := kvstore.DeliverTx(tx); r.IsErr() {
t.Fatal(r)
}
}
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{header.Height})
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
kvstore.Commit()
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
@@ -190,12 +191,12 @@ func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff
}
// order doesn't matter
func valsEqual(t *testing.T, vals1, vals2 []types.Validator) {
func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
if len(vals1) != len(vals2) {
t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1))
}
sort.Sort(types.Validators(vals1))
sort.Sort(types.Validators(vals2))
sort.Sort(types.ValidatorUpdates(vals1))
sort.Sort(types.ValidatorUpdates(vals2))
for i, v1 := range vals1 {
v2 := vals2[i]
if !bytes.Equal(v1.PubKey.Data, v2.PubKey.Data) ||
@@ -207,7 +208,7 @@ func valsEqual(t *testing.T, vals1, vals2 []types.Validator) {
func makeSocketClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) {
// Start the listener
socket := cmn.Fmt("unix://%s.sock", name)
socket := fmt.Sprintf("unix://%s.sock", name)
logger := log.TestingLogger()
server := abciserver.NewSocketServer(socket, app)
@@ -229,7 +230,7 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client,
func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) {
// Start the listener
socket := cmn.Fmt("unix://%s.sock", name)
socket := fmt.Sprintf("unix://%s.sock", name)
logger := log.TestingLogger()
gapp := types.NewGRPCApplication(app)

View File

@@ -9,7 +9,6 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
)
@@ -26,7 +25,7 @@ type PersistentKVStoreApplication struct {
app *KVStoreApplication
// validator set
ValUpdates []types.Validator
ValUpdates []types.ValidatorUpdate
logger log.Logger
}
@@ -102,7 +101,7 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
// Track the block hash and header information
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
// reset valset changes
app.ValUpdates = make([]types.Validator, 0)
app.ValUpdates = make([]types.ValidatorUpdate, 0)
return types.ResponseBeginBlock{}
}
@@ -114,11 +113,11 @@ func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) typ
//---------------------------------------------
// update validators
func (app *PersistentKVStoreApplication) Validators() (validators []types.Validator) {
func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) {
itr := app.app.state.db.Iterator(nil, nil)
for ; itr.Valid(); itr.Next() {
if isValidatorTx(itr.Key()) {
validator := new(types.Validator)
validator := new(types.ValidatorUpdate)
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
if err != nil {
panic(err)
@@ -130,7 +129,7 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida
}
func MakeValSetChangeTx(pubkey types.PubKey, power int64) []byte {
return []byte(cmn.Fmt("val:%X/%d", pubkey.Data, power))
return []byte(fmt.Sprintf("val:%X/%d", pubkey.Data, power))
}
func isValidatorTx(tx []byte) bool {
@@ -168,11 +167,11 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
}
// update
return app.updateValidator(types.Ed25519Validator(pubkey, int64(power)))
return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, int64(power)))
}
// add, update, or remove a validator
func (app *PersistentKVStoreApplication) updateValidator(v types.Validator) types.ResponseDeliverTx {
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
key := []byte("val:" + string(v.PubKey.Data))
if v.Power == 0 {
// remove validator

View File

@@ -12,11 +12,11 @@ import (
func InitChain(client abcicli.Client) error {
total := 10
vals := make([]types.Validator, total)
vals := make([]types.ValidatorUpdate, total)
for i := 0; i < total; i++ {
pubkey := cmn.RandBytes(33)
power := cmn.RandInt()
vals[i] = types.Ed25519Validator(pubkey, int64(power))
vals[i] = types.Ed25519ValidatorUpdate(pubkey, int64(power))
}
_, err := client.InitChainSync(types.RequestInitChain{
Validators: vals,

View File

@@ -26,7 +26,7 @@ func startClient(abciType string) abcicli.Client {
}
func setOption(client abcicli.Client, key, value string) {
_, err := client.SetOptionSync(types.RequestSetOption{key, value})
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
if err != nil {
panicf("setting %v=%v: \nerr: %v", key, value, err)
}

View File

@@ -28,6 +28,8 @@
-> code: OK
-> log: exists
-> height: 0
-> key: abc
-> key.hex: 616263
-> value: abc
-> value.hex: 616263
@@ -42,6 +44,8 @@
-> code: OK
-> log: exists
-> height: 0
-> key: def
-> key.hex: 646566
-> value: xyz
-> value.hex: 78797A

View File

@@ -85,7 +85,7 @@ func NewGRPCApplication(app Application) *GRPCApplication {
}
func (app *GRPCApplication) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) {
return &ResponseEcho{req.Message}, nil
return &ResponseEcho{Message: req.Message}, nil
}
func (app *GRPCApplication) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) {

View File

@@ -71,7 +71,7 @@ func encodeVarint(w io.Writer, i int64) (err error) {
func ToRequestEcho(message string) *Request {
return &Request{
Value: &Request_Echo{&RequestEcho{message}},
Value: &Request_Echo{&RequestEcho{Message: message}},
}
}
@@ -95,13 +95,13 @@ func ToRequestSetOption(req RequestSetOption) *Request {
func ToRequestDeliverTx(tx []byte) *Request {
return &Request{
Value: &Request_DeliverTx{&RequestDeliverTx{tx}},
Value: &Request_DeliverTx{&RequestDeliverTx{Tx: tx}},
}
}
func ToRequestCheckTx(tx []byte) *Request {
return &Request{
Value: &Request_CheckTx{&RequestCheckTx{tx}},
Value: &Request_CheckTx{&RequestCheckTx{Tx: tx}},
}
}
@@ -139,13 +139,13 @@ func ToRequestEndBlock(req RequestEndBlock) *Request {
func ToResponseException(errStr string) *Response {
return &Response{
Value: &Response_Exception{&ResponseException{errStr}},
Value: &Response_Exception{&ResponseException{Error: errStr}},
}
}
func ToResponseEcho(message string) *Response {
return &Response{
Value: &Response_Echo{&ResponseEcho{message}},
Value: &Response_Echo{&ResponseEcho{Message: message}},
}
}

View File

@@ -22,7 +22,7 @@ func TestMarshalJSON(t *testing.T) {
Data: []byte("hello"),
GasWanted: 43,
Tags: []cmn.KVPair{
{[]byte("pho"), []byte("bo")},
{Key: []byte("pho"), Value: []byte("bo")},
},
}
b, err = json.Marshal(&r1)
@@ -83,9 +83,8 @@ func TestWriteReadMessage2(t *testing.T) {
Log: phrase,
GasWanted: 10,
Tags: []cmn.KVPair{
cmn.KVPair{[]byte("abc"), []byte("def")},
cmn.KVPair{Key: []byte("abc"), Value: []byte("def")},
},
// Fee: cmn.KI64Pair{
},
// TODO: add the rest
}

View File

@@ -4,8 +4,8 @@ const (
PubKeyEd25519 = "ed25519"
)
func Ed25519Validator(pubkey []byte, power int64) Validator {
return Validator{
func Ed25519ValidatorUpdate(pubkey []byte, power int64) ValidatorUpdate {
return ValidatorUpdate{
// Address:
PubKey: PubKey{
Type: PubKeyEd25519,

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,9 @@ package types;
// For more information on gogo.proto, see:
// https://github.com/gogo/protobuf/blob/master/extensions.md
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "github.com/tendermint/tendermint/libs/common/types.proto";
import "github.com/tendermint/tendermint/crypto/merkle/merkle.proto";
// This file is copied from http://github.com/tendermint/abci
// NOTE: When using custom types, mind the warnings.
@@ -47,6 +49,8 @@ message RequestFlush {
message RequestInfo {
string version = 1;
uint64 block_version = 2;
uint64 p2p_version = 3;
}
// nondeterministic
@@ -56,10 +60,10 @@ message RequestSetOption {
}
message RequestInitChain {
int64 time = 1;
google.protobuf.Timestamp time = 1 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
string chain_id = 2;
ConsensusParams consensus_params = 3;
repeated Validator validators = 4 [(gogoproto.nullable)=false];
repeated ValidatorUpdate validators = 4 [(gogoproto.nullable)=false];
bytes app_state_bytes = 5;
}
@@ -73,7 +77,7 @@ message RequestQuery {
message RequestBeginBlock {
bytes hash = 1;
Header header = 2 [(gogoproto.nullable)=false];
repeated SigningValidator validators = 3 [(gogoproto.nullable)=false];
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable)=false];
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable)=false];
}
@@ -126,9 +130,12 @@ message ResponseFlush {
message ResponseInfo {
string data = 1;
string version = 2;
int64 last_block_height = 3;
bytes last_block_app_hash = 4;
uint64 app_version = 3;
int64 last_block_height = 4;
bytes last_block_app_hash = 5;
}
// nondeterministic
@@ -141,7 +148,7 @@ message ResponseSetOption {
message ResponseInitChain {
ConsensusParams consensus_params = 1;
repeated Validator validators = 2 [(gogoproto.nullable)=false];
repeated ValidatorUpdate validators = 2 [(gogoproto.nullable)=false];
}
message ResponseQuery {
@@ -152,8 +159,9 @@ message ResponseQuery {
int64 index = 5;
bytes key = 6;
bytes value = 7;
bytes proof = 8;
merkle.Proof proof = 8;
int64 height = 9;
string codespace = 10;
}
message ResponseBeginBlock {
@@ -168,7 +176,7 @@ message ResponseCheckTx {
int64 gas_wanted = 5;
int64 gas_used = 6;
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
common.KI64Pair fee = 8 [(gogoproto.nullable)=false];
string codespace = 8;
}
message ResponseDeliverTx {
@@ -179,11 +187,11 @@ message ResponseDeliverTx {
int64 gas_wanted = 5;
int64 gas_used = 6;
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
common.KI64Pair fee = 8 [(gogoproto.nullable)=false];
string codespace = 8;
}
message ResponseEndBlock {
repeated Validator validator_updates = 1 [(gogoproto.nullable)=false];
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable)=false];
ConsensusParams consensus_param_updates = 2;
repeated common.KVPair tags = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
}
@@ -199,63 +207,97 @@ message ResponseCommit {
// ConsensusParams contains all consensus-relevant parameters
// that can be adjusted by the abci app
message ConsensusParams {
BlockSize block_size = 1;
TxSize tx_size = 2;
BlockGossip block_gossip = 3;
BlockSizeParams block_size = 1;
EvidenceParams evidence = 2;
ValidatorParams validator = 3;
}
// BlockSize contain limits on the block size.
message BlockSize {
int32 max_bytes = 1;
int32 max_txs = 2;
int64 max_gas = 3;
}
// TxSize contain limits on the tx size.
message TxSize {
int32 max_bytes = 1;
// BlockSize contains limits on the block size.
message BlockSizeParams {
// Note: must be greater than 0
int64 max_bytes = 1;
// Note: must be greater or equal to -1
int64 max_gas = 2;
}
// BlockGossip determine consensus critical
// elements of how blocks are gossiped
message BlockGossip {
// Note: must not be 0
int32 block_part_size_bytes = 1;
// EvidenceParams contains limits on the evidence.
message EvidenceParams {
// Note: must be greater than 0
int64 max_age = 1;
}
// ValidatorParams contains limits on validators.
message ValidatorParams {
repeated string pub_key_types = 1;
}
message LastCommitInfo {
int32 round = 1;
repeated VoteInfo votes = 2 [(gogoproto.nullable)=false];
}
//----------------------------------------
// Blockchain Types
// just the minimum the app might need
message Header {
// basics
string chain_id = 1 [(gogoproto.customname)="ChainID"];
int64 height = 2;
int64 time = 3;
// basic block info
Version version = 1 [(gogoproto.nullable)=false];
string chain_id = 2 [(gogoproto.customname)="ChainID"];
int64 height = 3;
google.protobuf.Timestamp time = 4 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
int64 num_txs = 5;
int64 total_txs = 6;
// txs
int32 num_txs = 4;
int64 total_txs = 5;
// prev block info
BlockID last_block_id = 7 [(gogoproto.nullable)=false];
// hashes
bytes last_block_hash = 6;
bytes validators_hash = 7;
bytes app_hash = 8;
// hashes of block data
bytes last_commit_hash = 8; // commit from validators from the last block
bytes data_hash = 9; // transactions
// consensus
Validator proposer = 9 [(gogoproto.nullable)=false];
// hashes from the app output from the prev block
bytes validators_hash = 10; // validators for the current block
bytes next_validators_hash = 11; // validators for the next block
bytes consensus_hash = 12; // consensus params for current block
bytes app_hash = 13; // state after txs from the previous block
bytes last_results_hash = 14;// root hash of all results from the txs from the previous block
// consensus info
bytes evidence_hash = 15; // evidence included in the block
bytes proposer_address = 16; // original proposer of the block
}
message Version {
uint64 Block = 1;
uint64 App = 2;
}
message BlockID {
bytes hash = 1;
PartSetHeader parts_header = 2 [(gogoproto.nullable)=false];
}
message PartSetHeader {
int32 total = 1;
bytes hash = 2;
}
// Validator
message Validator {
bytes address = 1;
PubKey pub_key = 2 [(gogoproto.nullable)=false];
//PubKey pub_key = 2 [(gogoproto.nullable)=false];
int64 power = 3;
}
// Validator with an extra bool
message SigningValidator {
// ValidatorUpdate
message ValidatorUpdate {
PubKey pub_key = 1 [(gogoproto.nullable)=false];
int64 power = 2;
}
// VoteInfo
message VoteInfo {
Validator validator = 1 [(gogoproto.nullable)=false];
bool signed_last_block = 2;
}
@@ -269,7 +311,7 @@ message Evidence {
string type = 1;
Validator validator = 2 [(gogoproto.nullable)=false];
int64 height = 3;
int64 time = 4;
google.protobuf.Timestamp time = 4 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
int64 total_voting_power = 5;
}

View File

@@ -1,31 +0,0 @@
package types
import (
"testing"
asrt "github.com/stretchr/testify/assert"
)
func TestConsensusParams(t *testing.T) {
assert := asrt.New(t)
params := &ConsensusParams{
BlockSize: &BlockSize{MaxGas: 12345},
BlockGossip: &BlockGossip{BlockPartSizeBytes: 54321},
}
var noParams *ConsensusParams // nil
// no error with nil fields
assert.Nil(noParams.GetBlockSize())
assert.EqualValues(noParams.GetBlockSize().GetMaxGas(), 0)
// get values with real fields
assert.NotNil(params.GetBlockSize())
assert.EqualValues(params.GetBlockSize().GetMaxTxs(), 0)
assert.EqualValues(params.GetBlockSize().GetMaxGas(), 12345)
assert.NotNil(params.GetBlockGossip())
assert.EqualValues(params.GetBlockGossip().GetBlockPartSizeBytes(), 54321)
assert.Nil(params.GetTxSize())
assert.EqualValues(params.GetTxSize().GetMaxBytes(), 0)
}

File diff suppressed because it is too large Load Diff

View File

@@ -2,58 +2,33 @@ package types
import (
"bytes"
"encoding/json"
"sort"
cmn "github.com/tendermint/tendermint/libs/common"
)
//------------------------------------------------------------------------------
// Validators is a list of validators that implements the Sort interface
type Validators []Validator
// ValidatorUpdates is a list of validators that implements the Sort interface
type ValidatorUpdates []ValidatorUpdate
var _ sort.Interface = (Validators)(nil)
var _ sort.Interface = (ValidatorUpdates)(nil)
// All these methods for Validators:
// All these methods for ValidatorUpdates:
// Len, Less and Swap
// are for Validators to implement sort.Interface
// are for ValidatorUpdates to implement sort.Interface
// which will be used by the sort package.
// See Issue https://github.com/tendermint/abci/issues/212
func (v Validators) Len() int {
func (v ValidatorUpdates) Len() int {
return len(v)
}
// XXX: doesn't distinguish same validator with different power
func (v Validators) Less(i, j int) bool {
func (v ValidatorUpdates) Less(i, j int) bool {
return bytes.Compare(v[i].PubKey.Data, v[j].PubKey.Data) <= 0
}
func (v Validators) Swap(i, j int) {
func (v ValidatorUpdates) Swap(i, j int) {
v1 := v[i]
v[i] = v[j]
v[j] = v1
}
func ValidatorsString(vs Validators) string {
s := make([]validatorPretty, len(vs))
for i, v := range vs {
s[i] = validatorPretty{
Address: v.Address,
PubKey: v.PubKey.Data,
Power: v.Power,
}
}
b, err := json.Marshal(s)
if err != nil {
panic(err.Error())
}
return string(b)
}
type validatorPretty struct {
Address cmn.HexBytes `json:"address"`
PubKey []byte `json:"pub_key"`
Power int64 `json:"power"`
}

View File

@@ -1,9 +1,9 @@
package version
// NOTE: we should probably be versioning the ABCI and the abci-cli separately
import (
"github.com/tendermint/tendermint/version"
)
const Maj = "0"
const Min = "12"
const Fix = "0"
// TODO: eliminate this after some version refactor
const Version = "0.12.0"
const Version = version.ABCIVersion

View File

@@ -12,20 +12,28 @@ import (
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
func testNodeInfo(id p2p.ID) p2p.DefaultNodeInfo {
return p2p.DefaultNodeInfo{
ProtocolVersion: p2p.ProtocolVersion{1, 2, 3},
ID_: id,
Moniker: "SOMENAME",
Network: "SOMENAME",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: p2p.DefaultNodeInfoOther{
TxIndex: "on",
RPCAddress: "0.0.0.0:26657",
},
}
}
func BenchmarkEncodeStatusWire(b *testing.B) {
b.StopTimer()
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
status := &ctypes.ResultStatus{
NodeInfo: p2p.NodeInfo{
ID: nodeKey.ID(),
Moniker: "SOMENAME",
Network: "SOMENAME",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: []string{"SOMESTRING", "OTHERSTRING"},
},
NodeInfo: testNodeInfo(nodeKey.ID()),
SyncInfo: ctypes.SyncInfo{
LatestBlockHash: []byte("SOMEBYTES"),
LatestBlockHeight: 123,
@@ -53,14 +61,7 @@ func BenchmarkEncodeNodeInfoWire(b *testing.B) {
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
nodeInfo := p2p.NodeInfo{
ID: nodeKey.ID(),
Moniker: "SOMENAME",
Network: "SOMENAME",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: []string{"SOMESTRING", "OTHERSTRING"},
}
nodeInfo := testNodeInfo(nodeKey.ID())
b.StartTimer()
counter := 0
@@ -78,14 +79,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
nodeInfo := p2p.NodeInfo{
ID: nodeKey.ID(),
Moniker: "SOMENAME",
Network: "SOMENAME",
ListenAddr: "SOMEADDR",
Version: "SOMEVER",
Other: []string{"SOMESTRING", "OTHERSTRING"},
}
nodeInfo := testNodeInfo(nodeKey.ID())
b.StartTimer()
counter := 0

View File

@@ -6,8 +6,8 @@ import (
"fmt"
"time"
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
cmn "github.com/tendermint/tendermint/libs/common"
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
)
func main() {

View File

@@ -29,10 +29,10 @@ eg, L = latency = 0.1s
*/
const (
requestIntervalMS = 100
maxTotalRequesters = 1000
requestIntervalMS = 2
maxTotalRequesters = 600
maxPendingRequests = maxTotalRequesters
maxPendingRequestsPerPeer = 50
maxPendingRequestsPerPeer = 20
// Minimum recv rate to ensure we're receiving blocks from a peer fast
// enough. If a peer is not sending us data at at least that rate, we
@@ -219,14 +219,12 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
defer pool.mtx.Unlock()
request := pool.requesters[height]
if request.block == nil {
panic("Expected block to be non-nil")
peerID := request.getPeerID()
if peerID != p2p.ID("") {
// RemovePeer will redo all requesters associated with this peer.
pool.removePeer(peerID)
}
// RemovePeer will redo all requesters associated with this peer.
pool.removePeer(request.peerID)
return request.peerID
return peerID
}
// TODO: ensure that blocks come in order for each peer.
@@ -367,10 +365,10 @@ func (pool *BlockPool) debug() string {
nextHeight := pool.height + pool.requestersLen()
for h := pool.height; h < nextHeight; h++ {
if pool.requesters[h] == nil {
str += cmn.Fmt("H(%v):X ", h)
str += fmt.Sprintf("H(%v):X ", h)
} else {
str += cmn.Fmt("H(%v):", h)
str += cmn.Fmt("B?(%v) ", pool.requesters[h].block != nil)
str += fmt.Sprintf("H(%v):", h)
str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil)
}
}
return str

View File

@@ -1,6 +1,7 @@
package blockchain
import (
"errors"
"fmt"
"reflect"
"time"
@@ -18,7 +19,8 @@ const (
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
BlockchainChannel = byte(0x40)
trySyncIntervalMS = 50
trySyncIntervalMS = 10
// stop syncing when last block's time is
// within this much of the system time.
// stopSyncingDurationMinutes = 10
@@ -76,8 +78,9 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *Bl
store.Height()))
}
const capacity = 1000 // must be bigger than peers count
requestsCh := make(chan BlockRequest, capacity)
requestsCh := make(chan BlockRequest, maxTotalRequesters)
const capacity = 1000 // must be bigger than peers count
errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock
pool := NewBlockPool(
@@ -107,9 +110,6 @@ func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
// OnStart implements cmn.Service.
func (bcR *BlockchainReactor) OnStart() error {
if err := bcR.BaseReactor.OnStart(); err != nil {
return err
}
if bcR.fastSync {
err := bcR.pool.Start()
if err != nil {
@@ -122,7 +122,6 @@ func (bcR *BlockchainReactor) OnStart() error {
// OnStop implements cmn.Service.
func (bcR *BlockchainReactor) OnStop() {
bcR.BaseReactor.OnStop()
bcR.pool.Stop()
}
@@ -182,6 +181,12 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
return
}
if err = msg.ValidateBasic(); err != nil {
bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
bcR.Switch.StopPeerForError(src, err)
return
}
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
switch msg := msg.(type) {
@@ -190,7 +195,6 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
// Unfortunately not queued since the queue is full.
}
case *bcBlockResponseMessage:
// Got a block.
bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes))
case *bcStatusRequestMessage:
// Send peer our state.
@@ -203,13 +207,12 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
// Got a peer status. Unverified.
bcR.pool.SetPeerHeight(src.ID(), msg.Height)
default:
bcR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
}
// Handle messages from the poolReactor telling the reactor what to do.
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.)
func (bcR *BlockchainReactor) poolRoutine() {
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
@@ -224,6 +227,8 @@ func (bcR *BlockchainReactor) poolRoutine() {
lastHundred := time.Now()
lastRate := 0.0
didProcessCh := make(chan struct{}, 1)
FOR_LOOP:
for {
select {
@@ -239,14 +244,17 @@ FOR_LOOP:
// The pool handles timeouts, just let it go.
continue FOR_LOOP
}
case err := <-bcR.errorsCh:
peer := bcR.Switch.Peers().Get(err.peerID)
if peer != nil {
bcR.Switch.StopPeerForError(peer, err)
}
case <-statusUpdateTicker.C:
// ask for status updates
go bcR.BroadcastStatusRequest() // nolint: errcheck
case <-switchToConsensusTicker.C:
height, numPending, lenRequesters := bcR.pool.GetStatus()
outbound, inbound, _ := bcR.Switch.NumPeers()
@@ -261,60 +269,78 @@ FOR_LOOP:
break FOR_LOOP
}
case <-trySyncTicker.C: // chan time
// This loop can be slow as long as it's doing syncing work.
SYNC_LOOP:
for i := 0; i < 10; i++ {
// See if there are any blocks to sync.
first, second := bcR.pool.PeekTwoBlocks()
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
if first == nil || second == nil {
// We need both to sync the first block.
break SYNC_LOOP
select {
case didProcessCh <- struct{}{}:
default:
}
case <-didProcessCh:
// NOTE: It is a subtle mistake to process more than a single block
// at a time (e.g. 10) here, because we only TrySend 1 request per
// loop. The ratio mismatch can result in starving of blocks, a
// sudden burst of requests and responses, and repeat.
// Consequently, it is better to split these routines rather than
// coupling them as it's written here. TODO uncouple from request
// routine.
// See if there are any blocks to sync.
first, second := bcR.pool.PeekTwoBlocks()
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
if first == nil || second == nil {
// We need both to sync the first block.
continue FOR_LOOP
} else {
// Try again quickly next loop.
didProcessCh <- struct{}{}
}
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
firstPartsHeader := firstParts.Header()
firstID := types.BlockID{first.Hash(), firstPartsHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err := state.Validators.VerifyCommit(
chainID, firstID, first.Height, second.LastCommit)
if err != nil {
bcR.Logger.Error("Error in validation", "err", err)
peerID := bcR.pool.RedoRequest(first.Height)
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
// NOTE: we've already removed the peer's request, but we
// still need to clean up the rest.
bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
}
firstParts := first.MakePartSet(state.ConsensusParams.BlockPartSizeBytes)
firstPartsHeader := firstParts.Header()
firstID := types.BlockID{first.Hash(), firstPartsHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err := state.Validators.VerifyCommit(
chainID, firstID, first.Height, second.LastCommit)
continue FOR_LOOP
} else {
bcR.pool.PopRequest()
// TODO: batch saves so we dont persist to disk every block
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: same thing for app - but we would need a way to
// get the hash without persisting the state
var err error
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
if err != nil {
bcR.Logger.Error("Error in validation", "err", err)
peerID := bcR.pool.RedoRequest(first.Height)
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
}
break SYNC_LOOP
} else {
bcR.pool.PopRequest()
// TODO This is bad, are we zombie?
cmn.PanicQ(fmt.Sprintf("Failed to process committed block (%d:%X): %v",
first.Height, first.Hash(), err))
}
blocksSynced++
// TODO: batch saves so we dont persist to disk every block
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: same thing for app - but we would need a way to
// get the hash without persisting the state
var err error
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
if err != nil {
// TODO This is bad, are we zombie?
cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v",
first.Height, first.Hash(), err))
}
blocksSynced++
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
lastHundred = time.Now()
}
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
lastHundred = time.Now()
}
}
continue FOR_LOOP
case <-bcR.Quit():
break FOR_LOOP
}
@@ -332,15 +358,17 @@ func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
// Messages
// BlockchainMessage is a generic message for this reactor.
type BlockchainMessage interface{}
type BlockchainMessage interface {
ValidateBasic() error
}
func RegisterBlockchainMessages(cdc *amino.Codec) {
cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/mempool/BlockRequest", nil)
cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/mempool/BlockResponse", nil)
cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/mempool/NoBlockResponse", nil)
cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/mempool/StatusResponse", nil)
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil)
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil)
cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil)
cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil)
cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil)
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil)
}
func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
@@ -357,16 +385,32 @@ type bcBlockRequestMessage struct {
Height int64
}
// ValidateBasic performs basic validation.
func (m *bcBlockRequestMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
return nil
}
func (m *bcBlockRequestMessage) String() string {
return cmn.Fmt("[bcBlockRequestMessage %v]", m.Height)
return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height)
}
type bcNoBlockResponseMessage struct {
Height int64
}
// ValidateBasic performs basic validation.
func (m *bcNoBlockResponseMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
return nil
}
func (brm *bcNoBlockResponseMessage) String() string {
return cmn.Fmt("[bcNoBlockResponseMessage %d]", brm.Height)
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", brm.Height)
}
//-------------------------------------
@@ -375,8 +419,17 @@ type bcBlockResponseMessage struct {
Block *types.Block
}
// ValidateBasic performs basic validation.
func (m *bcBlockResponseMessage) ValidateBasic() error {
if err := m.Block.ValidateBasic(); err != nil {
return err
}
return nil
}
func (m *bcBlockResponseMessage) String() string {
return cmn.Fmt("[bcBlockResponseMessage %v]", m.Block.Height)
return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height)
}
//-------------------------------------
@@ -385,8 +438,16 @@ type bcStatusRequestMessage struct {
Height int64
}
// ValidateBasic performs basic validation.
func (m *bcStatusRequestMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
return nil
}
func (m *bcStatusRequestMessage) String() string {
return cmn.Fmt("[bcStatusRequestMessage %v]", m.Height)
return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height)
}
//-------------------------------------
@@ -395,6 +456,14 @@ type bcStatusResponseMessage struct {
Height int64
}
func (m *bcStatusResponseMessage) String() string {
return cmn.Fmt("[bcStatusResponseMessage %v]", m.Height)
// ValidateBasic performs basic validation.
func (m *bcStatusResponseMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
return nil
}
func (m *bcStatusResponseMessage) String() string {
return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height)
}

View File

@@ -42,13 +42,13 @@ func newBlockchainReactor(logger log.Logger, maxBlockHeight int64) *BlockchainRe
bcReactor.SetLogger(logger.With("module", "blockchain"))
// Next: we need to set a switch in order for peers to be added in
bcReactor.Switch = p2p.NewSwitch(cfg.DefaultP2PConfig())
bcReactor.Switch = p2p.NewSwitch(cfg.DefaultP2PConfig(), nil)
// Lastly: let's add some blocks in
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
firstBlock := makeBlock(blockHeight, state)
secondBlock := makeBlock(blockHeight+1, state)
firstParts := firstBlock.MakePartSet(state.ConsensusParams.BlockGossip.BlockPartSizeBytes)
firstParts := firstBlock.MakePartSet(types.BlockPartSizeBytes)
blockStore.SaveBlock(firstBlock, firstParts, secondBlock.LastCommit)
}
@@ -156,7 +156,7 @@ func makeTxs(height int64) (txs []types.Tx) {
}
func makeBlock(height int64, state sm.State) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit), nil)
block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit), nil, state.Validators.GetProposer().Address)
return block
}
@@ -198,7 +198,7 @@ func (tp *bcrTestPeer) TrySend(chID byte, msgBytes []byte) bool {
}
func (tp *bcrTestPeer) Send(chID byte, msgBytes []byte) bool { return tp.TrySend(chID, msgBytes) }
func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} }
func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.DefaultNodeInfo{} }
func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} }
func (tp *bcrTestPeer) ID() p2p.ID { return tp.id }
func (tp *bcrTestPeer) IsOutbound() bool { return false }

View File

@@ -63,7 +63,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block {
part := bs.LoadBlockPart(height, i)
buf = append(buf, part.Bytes...)
}
err := cdc.UnmarshalBinary(buf, block)
err := cdc.UnmarshalBinaryLengthPrefixed(buf, block)
if err != nil {
// NOTE: The existence of meta should imply the existence of the
// block. So, make sure meta is only saved after blocks are saved.
@@ -148,10 +148,10 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
}
height := block.Height
if g, w := height, bs.Height()+1; g != w {
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
}
if !blockParts.IsComplete() {
cmn.PanicSanity(cmn.Fmt("BlockStore can only save complete block part sets"))
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save complete block part sets"))
}
// Save block meta
@@ -188,7 +188,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
if height != bs.Height()+1 {
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
}
partBytes := cdc.MustMarshalBinaryBare(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes)
@@ -224,7 +224,7 @@ type BlockStoreStateJSON struct {
func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
bytes, err := cdc.MarshalJSON(bsj)
if err != nil {
cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err))
cmn.PanicSanity(fmt.Sprintf("Could not marshal state bytes: %v", err))
}
db.SetSync(blockStoreKey, bytes)
}

View File

@@ -6,7 +6,6 @@ import (
"runtime/debug"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -14,6 +13,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
func TestLoadBlockStoreStateJSON(t *testing.T) {
@@ -49,7 +49,7 @@ func TestNewBlockStore(t *testing.T) {
return nil, nil
})
require.NotNil(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data)
assert.Contains(t, panicErr.Error(), tt.wantErr, "#%d data: %q", i, tt.data)
assert.Contains(t, fmt.Sprintf("%#v", panicErr), tt.wantErr, "#%d data: %q", i, tt.data)
}
db.Set(blockStoreKey, nil)
@@ -70,7 +70,7 @@ var (
part1 = partSet.GetPart(0)
part2 = partSet.GetPart(1)
seenCommit1 = &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: time.Now().UTC()}}}
Timestamp: tmtime.Now()}}}
)
// TODO: This test should be simplified ...
@@ -91,7 +91,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
block := makeBlock(bs.Height()+1, state)
validPartSet := block.MakePartSet(2)
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: time.Now().UTC()}}}
Timestamp: tmtime.Now()}}}
bs.SaveBlock(block, partSet, seenCommit)
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
@@ -103,7 +103,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
Height: 1,
NumTxs: 100,
ChainID: "block_test",
Time: time.Now(),
Time: tmtime.Now(),
}
header2 := header1
header2.Height = 4
@@ -111,7 +111,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
// End of setup, test data
commitAtH10 := &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: time.Now().UTC()}}}
Timestamp: tmtime.Now()}}}
tuples := []struct {
block *types.Block
parts *types.PartSet
@@ -238,7 +238,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
if subStr := tuple.wantPanic; subStr != "" {
if panicErr == nil {
t.Errorf("#%d: want a non-nil panic", i)
} else if got := panicErr.Error(); !strings.Contains(got, subStr) {
} else if got := fmt.Sprintf("%#v", panicErr); !strings.Contains(got, subStr) {
t.Errorf("#%d:\n\tgotErr: %q\nwant substring: %q", i, got, subStr)
}
continue
@@ -335,7 +335,7 @@ func TestBlockFetchAtHeight(t *testing.T) {
partSet := block.MakePartSet(2)
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: time.Now().UTC()}}}
Timestamp: tmtime.Now()}}}
bs.SaveBlock(block, partSet, seenCommit)
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")

View File

@@ -2,7 +2,6 @@ package blockchain
import (
"github.com/tendermint/go-amino"
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
"github.com/tendermint/tendermint/types"
)
@@ -10,6 +9,5 @@ var cdc = amino.NewCodec()
func init() {
RegisterBlockchainMessages(cdc)
cryptoAmino.RegisterAmino(cdc)
types.RegisterEvidences(cdc)
types.RegisterBlockAmino(cdc)
}

View File

@@ -5,8 +5,8 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/p2p"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/p2p"
)
// GenNodeKeyCmd allows the generation of a node key. It prints node's ID to

View File

@@ -1,15 +1,16 @@
package commands
import (
"time"
"fmt"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
tmtime "github.com/tendermint/tendermint/types/time"
)
// InitFilesCmd initialises a fresh Tendermint Core instance.
@@ -52,13 +53,14 @@ func initFilesWithConfig(config *cfg.Config) error {
logger.Info("Found genesis file", "path", genFile)
} else {
genDoc := types.GenesisDoc{
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
GenesisTime: time.Now(),
ChainID: fmt.Sprintf("test-chain-%v", cmn.RandStr(6)),
GenesisTime: tmtime.Now(),
ConsensusParams: types.DefaultConsensusParams(),
}
genDoc.Validators = []types.GenesisValidator{{
PubKey: pv.GetPubKey(),
Power: 10,
Address: pv.GetPubKey().Address(),
PubKey: pv.GetPubKey(),
Power: 10,
}}
if err := genDoc.SaveAs(genFile); err != nil {

View File

@@ -7,7 +7,6 @@ import (
"github.com/spf13/cobra"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/lite/proxy"
rpcclient "github.com/tendermint/tendermint/rpc/client"
)
@@ -27,10 +26,12 @@ just with added trust and running locally.`,
}
var (
listenAddr string
nodeAddr string
chainID string
home string
listenAddr string
nodeAddr string
chainID string
home string
maxOpenConnections int
cacheSize int
)
func init() {
@@ -38,6 +39,8 @@ func init() {
LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:26657", "Connect to a Tendermint node at this address")
LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID")
LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory")
LiteCmd.Flags().IntVar(&maxOpenConnections, "max-open-connections", 900, "Maximum number of simultaneous connections (including WebSocket).")
LiteCmd.Flags().IntVar(&cacheSize, "cache-size", 10, "Specify the memory trust store cache size")
}
func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) {
@@ -66,17 +69,21 @@ func runProxy(cmd *cobra.Command, args []string) error {
}
// First, connect a client
logger.Info("Connecting to source HTTP client...")
node := rpcclient.NewHTTP(nodeAddr, "/websocket")
cert, err := proxy.GetCertifier(chainID, home, nodeAddr)
logger.Info("Constructing Verifier...")
cert, err := proxy.NewVerifier(chainID, home, node, logger, cacheSize)
if err != nil {
return err
return cmn.ErrorWrap(err, "constructing Verifier")
}
cert.SetLogger(logger)
sc := proxy.SecureClient(node, cert)
err = proxy.StartProxy(sc, listenAddr, logger)
logger.Info("Starting proxy...")
err = proxy.StartProxy(sc, listenAddr, logger, maxOpenConnections)
if err != nil {
return err
return cmn.ErrorWrap(err, "starting proxy")
}
cmn.TrapSignal(func() {

View File

@@ -5,8 +5,8 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/privval"
)
// ResetAllCmd removes the database of this Tendermint core

View File

@@ -1,6 +1,7 @@
package commands
import (
"fmt"
"os"
"github.com/spf13/cobra"
@@ -35,6 +36,9 @@ func ParseConfig() (*cfg.Config, error) {
}
conf.SetRoot(conf.RootDir)
cfg.EnsureRoot(conf.RootDir)
if err = conf.ValidateBasic(); err != nil {
return nil, fmt.Errorf("Error in config file: %v", err)
}
return conf, err
}

View File

@@ -2,6 +2,9 @@ package commands
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/spf13/cobra"
@@ -49,19 +52,31 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
Use: "node",
Short: "Run the tendermint node",
RunE: func(cmd *cobra.Command, args []string) error {
// Create & start node
n, err := nodeProvider(config, logger)
if err != nil {
return fmt.Errorf("Failed to create node: %v", err)
}
// Stop upon receiving SIGTERM or CTRL-C
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
for sig := range c {
logger.Error(fmt.Sprintf("captured %v, exiting...", sig))
if n.IsRunning() {
n.Stop()
}
os.Exit(1)
}
}()
if err := n.Start(); err != nil {
return fmt.Errorf("Failed to start node: %v", err)
}
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
// Trap signal, run forever.
n.RunForever()
// Run forever
select {}
return nil
},

View File

@@ -6,15 +6,15 @@ import (
"os"
"path/filepath"
"strings"
"time"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
tmtime "github.com/tendermint/tendermint/types/time"
)
var (
@@ -76,7 +76,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
genVals := make([]types.GenesisValidator, nValidators)
for i := 0; i < nValidators; i++ {
nodeDirName := cmn.Fmt("%s%d", nodeDirPrefix, i)
nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i)
nodeDir := filepath.Join(outputDir, nodeDirName)
config.SetRoot(nodeDir)
@@ -91,14 +91,15 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
pvFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidator)
pv := privval.LoadFilePV(pvFile)
genVals[i] = types.GenesisValidator{
PubKey: pv.GetPubKey(),
Power: 1,
Name: nodeDirName,
Address: pv.GetPubKey().Address(),
PubKey: pv.GetPubKey(),
Power: 1,
Name: nodeDirName,
}
}
for i := 0; i < nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i+nValidators))
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i+nValidators))
config.SetRoot(nodeDir)
err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm)
@@ -112,14 +113,14 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
// Generate genesis doc from generated validators
genDoc := &types.GenesisDoc{
GenesisTime: time.Now(),
GenesisTime: tmtime.Now(),
ChainID: "chain-" + cmn.RandStr(6),
Validators: genVals,
}
// Write genesis file.
for i := 0; i < nValidators+nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil {
_ = os.RemoveAll(outputDir)
return err
@@ -159,7 +160,7 @@ func hostnameOrIP(i int) string {
func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error {
persistentPeers := make([]string, nValidators+nNonValidators)
for i := 0; i < nValidators+nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
config.SetRoot(nodeDir)
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
if err != nil {
@@ -170,7 +171,7 @@ func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error {
persistentPeersList := strings.Join(persistentPeers, ",")
for i := 0; i < nValidators+nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
config.SetRoot(nodeDir)
config.P2P.PersistentPeers = persistentPeersList
config.P2P.AddrBookStrict = false

View File

@@ -5,6 +5,8 @@ import (
"os"
"path/filepath"
"time"
"github.com/pkg/errors"
)
const (
@@ -19,7 +21,7 @@ const (
// generate the config.toml. Please reflect any changes
// made here in the defaultConfigTemplate constant in
// config/toml.go
// NOTE: tmlibs/cli must know to look in the config dir!
// NOTE: libs/cli must know to look in the config dir!
var (
DefaultTendermintDir = ".tendermint"
defaultConfigDir = "config"
@@ -89,12 +91,32 @@ func (cfg *Config) SetRoot(root string) *Config {
return cfg
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *Config) ValidateBasic() error {
if err := cfg.RPC.ValidateBasic(); err != nil {
return errors.Wrap(err, "Error in [rpc] section")
}
if err := cfg.P2P.ValidateBasic(); err != nil {
return errors.Wrap(err, "Error in [p2p] section")
}
if err := cfg.Mempool.ValidateBasic(); err != nil {
return errors.Wrap(err, "Error in [mempool] section")
}
if err := cfg.Consensus.ValidateBasic(); err != nil {
return errors.Wrap(err, "Error in [consensus] section")
}
return errors.Wrap(
cfg.Instrumentation.ValidateBasic(),
"Error in [instrumentation] section",
)
}
//-----------------------------------------------------------------------------
// BaseConfig
// BaseConfig defines the base configuration for a Tendermint node
type BaseConfig struct {
// chainID is unexposed and immutable but here for convenience
chainID string
@@ -102,49 +124,49 @@ type BaseConfig struct {
// This should be set in viper so it can unmarshal into this struct
RootDir string `mapstructure:"home"`
// Path to the JSON file containing the initial validator set and other meta data
Genesis string `mapstructure:"genesis_file"`
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
PrivValidator string `mapstructure:"priv_validator_file"`
// A JSON file containing the private key to use for p2p authenticated encryption
NodeKey string `mapstructure:"node_key_file"`
// A custom human readable name for this node
Moniker string `mapstructure:"moniker"`
// TCP or UNIX socket address for Tendermint to listen on for
// connections from an external PrivValidator process
PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"`
// TCP or UNIX socket address of the ABCI application,
// or the name of an ABCI application compiled in with the Tendermint binary
ProxyApp string `mapstructure:"proxy_app"`
// Mechanism to connect to the ABCI application: socket | grpc
ABCI string `mapstructure:"abci"`
// Output level for logging
LogLevel string `mapstructure:"log_level"`
// TCP or UNIX socket address for the profiling server to listen on
ProfListenAddress string `mapstructure:"prof_laddr"`
// A custom human readable name for this node
Moniker string `mapstructure:"moniker"`
// If this node is many blocks behind the tip of the chain, FastSync
// allows them to catchup quickly by downloading blocks in parallel
// and verifying their commits
FastSync bool `mapstructure:"fast_sync"`
// If true, query the ABCI app on connecting to a new peer
// so the app can decide if we should keep the connection or not
FilterPeers bool `mapstructure:"filter_peers"` // false
// Database backend: leveldb | memdb
// Database backend: leveldb | memdb | cleveldb
DBBackend string `mapstructure:"db_backend"`
// Database directory
DBPath string `mapstructure:"db_dir"`
// Output level for logging
LogLevel string `mapstructure:"log_level"`
// Path to the JSON file containing the initial validator set and other meta data
Genesis string `mapstructure:"genesis_file"`
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
PrivValidator string `mapstructure:"priv_validator_file"`
// TCP or UNIX socket address for Tendermint to listen on for
// connections from an external PrivValidator process
PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"`
// A JSON file containing the private key to use for p2p authenticated encryption
NodeKey string `mapstructure:"node_key_file"`
// Mechanism to connect to the ABCI application: socket | grpc
ABCI string `mapstructure:"abci"`
// TCP or UNIX socket address for the profiling server to listen on
ProfListenAddress string `mapstructure:"prof_laddr"`
// If true, query the ABCI app on connecting to a new peer
// so the app can decide if we should keep the connection or not
FilterPeers bool `mapstructure:"filter_peers"` // false
}
// DefaultBaseConfig returns a default base configuration for a Tendermint node
@@ -220,6 +242,18 @@ type RPCConfig struct {
// TCP or UNIX socket address for the RPC server to listen on
ListenAddress string `mapstructure:"laddr"`
// A list of origins a cross-domain request can be executed from.
// If the special '*' value is present in the list, all origins will be allowed.
// An origin may contain a wildcard (*) to replace 0 or more characters (i.e.: http://*.domain.com).
// Only one wildcard can be used per origin.
CORSAllowedOrigins []string `mapstructure:"cors_allowed_origins"`
// A list of methods the client is allowed to use with cross-domain requests.
CORSAllowedMethods []string `mapstructure:"cors_allowed_methods"`
// A list of non simple headers the client is allowed to use with cross-domain requests.
CORSAllowedHeaders []string `mapstructure:"cors_allowed_headers"`
// TCP or UNIX socket address for the gRPC server to listen on
// NOTE: This server only supports /broadcast_tx_commit
GRPCListenAddress string `mapstructure:"grpc_laddr"`
@@ -239,20 +273,22 @@ type RPCConfig struct {
// If you want to accept more significant number than the default, make sure
// you increase your OS limits.
// 0 - unlimited.
// Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
// 1024 - 40 - 10 - 50 = 924 = ~900
MaxOpenConnections int `mapstructure:"max_open_connections"`
}
// DefaultRPCConfig returns a default configuration for the RPC server
func DefaultRPCConfig() *RPCConfig {
return &RPCConfig{
ListenAddress: "tcp://0.0.0.0:26657",
ListenAddress: "tcp://0.0.0.0:26657",
CORSAllowedOrigins: []string{},
CORSAllowedMethods: []string{"HEAD", "GET", "POST"},
CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"},
GRPCListenAddress: "",
GRPCMaxOpenConnections: 900, // no ipv4
GRPCMaxOpenConnections: 900,
Unsafe: false,
// should be < {ulimit -Sn} - {MaxNumPeers} - {N of wal, db and other open files}
// 1024 - 50 - 50 = 924 = ~900
Unsafe: false,
MaxOpenConnections: 900,
}
}
@@ -266,6 +302,23 @@ func TestRPCConfig() *RPCConfig {
return cfg
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *RPCConfig) ValidateBasic() error {
if cfg.GRPCMaxOpenConnections < 0 {
return errors.New("grpc_max_open_connections can't be negative")
}
if cfg.MaxOpenConnections < 0 {
return errors.New("max_open_connections can't be negative")
}
return nil
}
// IsCorsEnabled returns true if cross-origin resource sharing is enabled.
func (cfg *RPCConfig) IsCorsEnabled() bool {
return len(cfg.CORSAllowedOrigins) != 0
}
//-----------------------------------------------------------------------------
// P2PConfig
@@ -293,13 +346,17 @@ type P2PConfig struct {
AddrBook string `mapstructure:"addr_book_file"`
// Set true for strict address routability rules
// Set false for private or local networks
AddrBookStrict bool `mapstructure:"addr_book_strict"`
// Maximum number of peers to connect to
MaxNumPeers int `mapstructure:"max_num_peers"`
// Maximum number of inbound peers
MaxNumInboundPeers int `mapstructure:"max_num_inbound_peers"`
// Time to wait before flushing messages out on the connection, in ms
FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"`
// Maximum number of outbound peers to connect to, excluding persistent peers
MaxNumOutboundPeers int `mapstructure:"max_num_outbound_peers"`
// Time to wait before flushing messages out on the connection
FlushThrottleTimeout time.Duration `mapstructure:"flush_throttle_timeout"`
// Maximum size of a message packet payload, in bytes
MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
@@ -346,8 +403,9 @@ func DefaultP2PConfig() *P2PConfig {
UPNP: false,
AddrBook: defaultAddrBookPath,
AddrBookStrict: true,
MaxNumPeers: 50,
FlushThrottleTimeout: 100,
MaxNumInboundPeers: 40,
MaxNumOutboundPeers: 10,
FlushThrottleTimeout: 100 * time.Millisecond,
MaxPacketMsgPayloadSize: 1024, // 1 kB
SendRate: 5120000, // 5 mB/s
RecvRate: 5120000, // 5 mB/s
@@ -366,7 +424,7 @@ func DefaultP2PConfig() *P2PConfig {
func TestP2PConfig() *P2PConfig {
cfg := DefaultP2PConfig()
cfg.ListenAddress = "tcp://0.0.0.0:36656"
cfg.FlushThrottleTimeout = 10
cfg.FlushThrottleTimeout = 10 * time.Millisecond
cfg.AllowDuplicateIP = true
return cfg
}
@@ -376,6 +434,30 @@ func (cfg *P2PConfig) AddrBookFile() string {
return rootify(cfg.AddrBook, cfg.RootDir)
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *P2PConfig) ValidateBasic() error {
if cfg.MaxNumInboundPeers < 0 {
return errors.New("max_num_inbound_peers can't be negative")
}
if cfg.MaxNumOutboundPeers < 0 {
return errors.New("max_num_outbound_peers can't be negative")
}
if cfg.FlushThrottleTimeout < 0 {
return errors.New("flush_throttle_timeout can't be negative")
}
if cfg.MaxPacketMsgPayloadSize < 0 {
return errors.New("max_packet_msg_payload_size can't be negative")
}
if cfg.SendRate < 0 {
return errors.New("send_rate can't be negative")
}
if cfg.RecvRate < 0 {
return errors.New("recv_rate can't be negative")
}
return nil
}
// FuzzConnConfig is a FuzzedConnection configuration.
type FuzzConnConfig struct {
Mode int
@@ -401,24 +483,24 @@ func DefaultFuzzConnConfig() *FuzzConnConfig {
// MempoolConfig defines the configuration options for the Tendermint mempool
type MempoolConfig struct {
RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"`
RecheckEmpty bool `mapstructure:"recheck_empty"`
Broadcast bool `mapstructure:"broadcast"`
WalPath string `mapstructure:"wal_dir"`
Size int `mapstructure:"size"`
CacheSize int `mapstructure:"cache_size"`
RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"`
Broadcast bool `mapstructure:"broadcast"`
WalPath string `mapstructure:"wal_dir"`
Size int `mapstructure:"size"`
CacheSize int `mapstructure:"cache_size"`
}
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
func DefaultMempoolConfig() *MempoolConfig {
return &MempoolConfig{
Recheck: true,
RecheckEmpty: true,
Broadcast: true,
WalPath: filepath.Join(defaultDataDir, "mempool.wal"),
Size: 100000,
CacheSize: 100000,
Recheck: true,
Broadcast: true,
WalPath: "",
// Each signature verification takes .5ms, size reduced until we implement
// ABCI Recheck
Size: 5000,
CacheSize: 10000,
}
}
@@ -434,6 +516,23 @@ func (cfg *MempoolConfig) WalDir() string {
return rootify(cfg.WalPath, cfg.RootDir)
}
// WalEnabled returns true if the WAL is enabled.
func (cfg *MempoolConfig) WalEnabled() bool {
return cfg.WalPath != ""
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *MempoolConfig) ValidateBasic() error {
if cfg.Size < 0 {
return errors.New("size can't be negative")
}
if cfg.CacheSize < 0 {
return errors.New("cache_size can't be negative")
}
return nil
}
//-----------------------------------------------------------------------------
// ConsensusConfig
@@ -444,100 +543,101 @@ type ConsensusConfig struct {
WalPath string `mapstructure:"wal_file"`
walFile string // overrides WalPath if set
// All timeouts are in milliseconds
TimeoutPropose int `mapstructure:"timeout_propose"`
TimeoutProposeDelta int `mapstructure:"timeout_propose_delta"`
TimeoutPrevote int `mapstructure:"timeout_prevote"`
TimeoutPrevoteDelta int `mapstructure:"timeout_prevote_delta"`
TimeoutPrecommit int `mapstructure:"timeout_precommit"`
TimeoutPrecommitDelta int `mapstructure:"timeout_precommit_delta"`
TimeoutCommit int `mapstructure:"timeout_commit"`
TimeoutPropose time.Duration `mapstructure:"timeout_propose"`
TimeoutProposeDelta time.Duration `mapstructure:"timeout_propose_delta"`
TimeoutPrevote time.Duration `mapstructure:"timeout_prevote"`
TimeoutPrevoteDelta time.Duration `mapstructure:"timeout_prevote_delta"`
TimeoutPrecommit time.Duration `mapstructure:"timeout_precommit"`
TimeoutPrecommitDelta time.Duration `mapstructure:"timeout_precommit_delta"`
TimeoutCommit time.Duration `mapstructure:"timeout_commit"`
// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"`
// EmptyBlocks mode and possible interval between empty blocks in seconds
CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"`
CreateEmptyBlocksInterval int `mapstructure:"create_empty_blocks_interval"`
// EmptyBlocks mode and possible interval between empty blocks
CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"`
CreateEmptyBlocksInterval time.Duration `mapstructure:"create_empty_blocks_interval"`
// Reactor sleep duration parameters are in milliseconds
PeerGossipSleepDuration int `mapstructure:"peer_gossip_sleep_duration"`
PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"`
// Reactor sleep duration parameters
PeerGossipSleepDuration time.Duration `mapstructure:"peer_gossip_sleep_duration"`
PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer_query_maj23_sleep_duration"`
// Block time parameters. Corresponds to the minimum time increment between consecutive blocks.
BlockTimeIota time.Duration `mapstructure:"blocktime_iota"`
}
// DefaultConsensusConfig returns a default configuration for the consensus service
func DefaultConsensusConfig() *ConsensusConfig {
return &ConsensusConfig{
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
TimeoutPropose: 3000,
TimeoutProposeDelta: 500,
TimeoutPrevote: 1000,
TimeoutPrevoteDelta: 500,
TimeoutPrecommit: 1000,
TimeoutPrecommitDelta: 500,
TimeoutCommit: 1000,
TimeoutPropose: 3000 * time.Millisecond,
TimeoutProposeDelta: 500 * time.Millisecond,
TimeoutPrevote: 1000 * time.Millisecond,
TimeoutPrevoteDelta: 500 * time.Millisecond,
TimeoutPrecommit: 1000 * time.Millisecond,
TimeoutPrecommitDelta: 500 * time.Millisecond,
TimeoutCommit: 1000 * time.Millisecond,
SkipTimeoutCommit: false,
CreateEmptyBlocks: true,
CreateEmptyBlocksInterval: 0,
PeerGossipSleepDuration: 100,
PeerQueryMaj23SleepDuration: 2000,
CreateEmptyBlocksInterval: 0 * time.Second,
PeerGossipSleepDuration: 100 * time.Millisecond,
PeerQueryMaj23SleepDuration: 2000 * time.Millisecond,
BlockTimeIota: 1000 * time.Millisecond,
}
}
// TestConsensusConfig returns a configuration for testing the consensus service
func TestConsensusConfig() *ConsensusConfig {
cfg := DefaultConsensusConfig()
cfg.TimeoutPropose = 100
cfg.TimeoutProposeDelta = 1
cfg.TimeoutPrevote = 10
cfg.TimeoutPrevoteDelta = 1
cfg.TimeoutPrecommit = 10
cfg.TimeoutPrecommitDelta = 1
cfg.TimeoutCommit = 10
cfg.TimeoutPropose = 40 * time.Millisecond
cfg.TimeoutProposeDelta = 1 * time.Millisecond
cfg.TimeoutPrevote = 10 * time.Millisecond
cfg.TimeoutPrevoteDelta = 1 * time.Millisecond
cfg.TimeoutPrecommit = 10 * time.Millisecond
cfg.TimeoutPrecommitDelta = 1 * time.Millisecond
cfg.TimeoutCommit = 10 * time.Millisecond
cfg.SkipTimeoutCommit = true
cfg.PeerGossipSleepDuration = 5
cfg.PeerQueryMaj23SleepDuration = 250
cfg.PeerGossipSleepDuration = 5 * time.Millisecond
cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond
cfg.BlockTimeIota = 10 * time.Millisecond
return cfg
}
// MinValidVoteTime returns the minimum acceptable block time.
// See the [BFT time spec](https://godoc.org/github.com/tendermint/tendermint/docs/spec/consensus/bft-time.md).
func (cfg *ConsensusConfig) MinValidVoteTime(lastBlockTime time.Time) time.Time {
return lastBlockTime.Add(cfg.BlockTimeIota)
}
// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step
func (cfg *ConsensusConfig) WaitForTxs() bool {
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
}
// EmptyBlocks returns the amount of time to wait before proposing an empty block or starting the propose timer if there are no txs available
func (cfg *ConsensusConfig) EmptyBlocksInterval() time.Duration {
return time.Duration(cfg.CreateEmptyBlocksInterval) * time.Second
}
// Propose returns the amount of time to wait for a proposal
func (cfg *ConsensusConfig) Propose(round int) time.Duration {
return time.Duration(cfg.TimeoutPropose+cfg.TimeoutProposeDelta*round) * time.Millisecond
return time.Duration(
cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
}
// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes
func (cfg *ConsensusConfig) Prevote(round int) time.Duration {
return time.Duration(cfg.TimeoutPrevote+cfg.TimeoutPrevoteDelta*round) * time.Millisecond
return time.Duration(
cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
}
// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits
func (cfg *ConsensusConfig) Precommit(round int) time.Duration {
return time.Duration(cfg.TimeoutPrecommit+cfg.TimeoutPrecommitDelta*round) * time.Millisecond
return time.Duration(
cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
}
// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits for a single block (ie. a commit).
func (cfg *ConsensusConfig) Commit(t time.Time) time.Time {
return t.Add(time.Duration(cfg.TimeoutCommit) * time.Millisecond)
}
// PeerGossipSleep returns the amount of time to sleep if there is nothing to send from the ConsensusReactor
func (cfg *ConsensusConfig) PeerGossipSleep() time.Duration {
return time.Duration(cfg.PeerGossipSleepDuration) * time.Millisecond
}
// PeerQueryMaj23Sleep returns the amount of time to sleep after each VoteSetMaj23Message is sent in the ConsensusReactor
func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration {
return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond
return t.Add(cfg.TimeoutCommit)
}
// WalFile returns the full path to the write-ahead log file
@@ -553,11 +653,50 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) {
cfg.walFile = walFile
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *ConsensusConfig) ValidateBasic() error {
if cfg.TimeoutPropose < 0 {
return errors.New("timeout_propose can't be negative")
}
if cfg.TimeoutProposeDelta < 0 {
return errors.New("timeout_propose_delta can't be negative")
}
if cfg.TimeoutPrevote < 0 {
return errors.New("timeout_prevote can't be negative")
}
if cfg.TimeoutPrevoteDelta < 0 {
return errors.New("timeout_prevote_delta can't be negative")
}
if cfg.TimeoutPrecommit < 0 {
return errors.New("timeout_precommit can't be negative")
}
if cfg.TimeoutPrecommitDelta < 0 {
return errors.New("timeout_precommit_delta can't be negative")
}
if cfg.TimeoutCommit < 0 {
return errors.New("timeout_commit can't be negative")
}
if cfg.CreateEmptyBlocksInterval < 0 {
return errors.New("create_empty_blocks_interval can't be negative")
}
if cfg.PeerGossipSleepDuration < 0 {
return errors.New("peer_gossip_sleep_duration can't be negative")
}
if cfg.PeerQueryMaj23SleepDuration < 0 {
return errors.New("peer_query_maj23_sleep_duration can't be negative")
}
if cfg.BlockTimeIota < 0 {
return errors.New("blocktime_iota can't be negative")
}
return nil
}
//-----------------------------------------------------------------------------
// TxIndexConfig
// TxIndexConfig defines the configuration for the transaction
// indexer, including tags to index.
// TxIndexConfig defines the configuration for the transaction indexer,
// including tags to index.
type TxIndexConfig struct {
// What indexer to use for transactions
//
@@ -566,16 +705,21 @@ type TxIndexConfig struct {
// 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
Indexer string `mapstructure:"indexer"`
// Comma-separated list of tags to index (by default the only tag is tx hash)
// Comma-separated list of tags to index (by default the only tag is "tx.hash")
//
// You can also index transactions by height by adding "tx.height" tag here.
//
// It's recommended to index only a subset of tags due to possible memory
// bloat. This is, of course, depends on the indexer's DB and the volume of
// transactions.
IndexTags string `mapstructure:"index_tags"`
// When set to true, tells indexer to index all tags. Note this may be not
// desirable (see the comment above). IndexTags has a precedence over
// IndexAllTags (i.e. when given both, IndexTags will be indexed).
// When set to true, tells indexer to index all tags (predefined tags:
// "tx.hash", "tx.height" and all tags from DeliverTx responses).
//
// Note this may be not desirable (see the comment above). IndexTags has a
// precedence over IndexAllTags (i.e. when given both, IndexTags will be
// indexed).
IndexAllTags bool `mapstructure:"index_all_tags"`
}
@@ -611,6 +755,9 @@ type InstrumentationConfig struct {
// you increase your OS limits.
// 0 - unlimited.
MaxOpenConnections int `mapstructure:"max_open_connections"`
// Tendermint instrumentation namespace.
Namespace string `mapstructure:"namespace"`
}
// DefaultInstrumentationConfig returns a default configuration for metrics
@@ -620,6 +767,7 @@ func DefaultInstrumentationConfig() *InstrumentationConfig {
Prometheus: false,
PrometheusListenAddr: ":26660",
MaxOpenConnections: 3,
Namespace: "tendermint",
}
}
@@ -629,6 +777,15 @@ func TestInstrumentationConfig() *InstrumentationConfig {
return DefaultInstrumentationConfig()
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *InstrumentationConfig) ValidateBasic() error {
if cfg.MaxOpenConnections < 0 {
return errors.New("max_open_connections can't be negative")
}
return nil
}
//-----------------------------------------------------------------------------
// Utils

View File

@@ -2,6 +2,7 @@ package config
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
@@ -26,3 +27,12 @@ func TestDefaultConfig(t *testing.T) {
assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir())
}
func TestConfigValidateBasic(t *testing.T) {
cfg := DefaultConfig()
assert.NoError(t, cfg.ValidateBasic())
// tamper with timeout_propose
cfg.Consensus.TimeoutPropose = -10 * time.Second
assert.Error(t, cfg.ValidateBasic())
}

View File

@@ -77,11 +77,11 @@ moniker = "{{ .BaseConfig.Moniker }}"
# and verifying their commits
fast_sync = {{ .BaseConfig.FastSync }}
# Database backend: leveldb | memdb
# Database backend: leveldb | memdb | cleveldb
db_backend = "{{ .BaseConfig.DBBackend }}"
# Database directory
db_path = "{{ js .BaseConfig.DBPath }}"
db_dir = "{{ js .BaseConfig.DBPath }}"
# Output level for logging, including package level options
log_level = "{{ .BaseConfig.LogLevel }}"
@@ -94,8 +94,12 @@ genesis_file = "{{ js .BaseConfig.Genesis }}"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_file = "{{ js .BaseConfig.PrivValidator }}"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
priv_validator_laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}"
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "{{ js .BaseConfig.NodeKey}}"
node_key_file = "{{ js .BaseConfig.NodeKey }}"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "{{ .BaseConfig.ABCI }}"
@@ -115,6 +119,17 @@ filter_peers = {{ .BaseConfig.FilterPeers }}
# TCP or UNIX socket address for the RPC server to listen on
laddr = "{{ .RPC.ListenAddress }}"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors_allowed_origins = "{{ .RPC.CORSAllowedOrigins }}"
# A list of methods the client is allowed to use with cross-domain requests
cors_allowed_methods = "{{ .RPC.CORSAllowedMethods }}"
# A list of non simple headers the client is allowed to use with cross-domain requests
cors_allowed_headers = "{{ .RPC.CORSAllowedHeaders }}"
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = "{{ .RPC.GRPCListenAddress }}"
@@ -124,6 +139,8 @@ grpc_laddr = "{{ .RPC.GRPCListenAddress }}"
# If you want to accept more significant number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
grpc_max_open_connections = {{ .RPC.GRPCMaxOpenConnections }}
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
@@ -134,6 +151,8 @@ unsafe = {{ .RPC.Unsafe }}
# If you want to accept more significant number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max_open_connections = {{ .RPC.MaxOpenConnections }}
##### peer to peer configuration options #####
@@ -161,13 +180,17 @@ upnp = {{ .P2P.UPNP }}
addr_book_file = "{{ js .P2P.AddrBook }}"
# Set true for strict address routability rules
# Set false for private or local networks
addr_book_strict = {{ .P2P.AddrBookStrict }}
# Time to wait before flushing messages out on the connection, in ms
flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }}
# Maximum number of inbound peers
max_num_inbound_peers = {{ .P2P.MaxNumInboundPeers }}
# Maximum number of peers to connect to
max_num_peers = {{ .P2P.MaxNumPeers }}
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = {{ .P2P.MaxNumOutboundPeers }}
# Time to wait before flushing messages out on the connection
flush_throttle_timeout = "{{ .P2P.FlushThrottleTimeout }}"
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }}
@@ -190,11 +213,17 @@ seed_mode = {{ .P2P.SeedMode }}
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = "{{ .P2P.PrivatePeerIDs }}"
# Toggle to disable guard against peers connecting from the same ip.
allow_duplicate_ip = {{ .P2P.AllowDuplicateIP }}
# Peer connection configuration.
handshake_timeout = "{{ .P2P.HandshakeTimeout }}"
dial_timeout = "{{ .P2P.DialTimeout }}"
##### mempool configuration options #####
[mempool]
recheck = {{ .Mempool.Recheck }}
recheck_empty = {{ .Mempool.RecheckEmpty }}
broadcast = {{ .Mempool.Broadcast }}
wal_dir = "{{ js .Mempool.WalPath }}"
@@ -209,25 +238,24 @@ cache_size = {{ .Mempool.CacheSize }}
wal_file = "{{ js .Consensus.WalPath }}"
# All timeouts are in milliseconds
timeout_propose = {{ .Consensus.TimeoutPropose }}
timeout_propose_delta = {{ .Consensus.TimeoutProposeDelta }}
timeout_prevote = {{ .Consensus.TimeoutPrevote }}
timeout_prevote_delta = {{ .Consensus.TimeoutPrevoteDelta }}
timeout_precommit = {{ .Consensus.TimeoutPrecommit }}
timeout_precommit_delta = {{ .Consensus.TimeoutPrecommitDelta }}
timeout_commit = {{ .Consensus.TimeoutCommit }}
timeout_propose = "{{ .Consensus.TimeoutPropose }}"
timeout_propose_delta = "{{ .Consensus.TimeoutProposeDelta }}"
timeout_prevote = "{{ .Consensus.TimeoutPrevote }}"
timeout_prevote_delta = "{{ .Consensus.TimeoutPrevoteDelta }}"
timeout_precommit = "{{ .Consensus.TimeoutPrecommit }}"
timeout_precommit_delta = "{{ .Consensus.TimeoutPrecommitDelta }}"
timeout_commit = "{{ .Consensus.TimeoutCommit }}"
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }}
# EmptyBlocks mode and possible interval between empty blocks in seconds
# EmptyBlocks mode and possible interval between empty blocks
create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }}
create_empty_blocks_interval = {{ .Consensus.CreateEmptyBlocksInterval }}
create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
# Reactor sleep duration parameters are in milliseconds
peer_gossip_sleep_duration = {{ .Consensus.PeerGossipSleepDuration }}
peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }}
# Reactor sleep duration parameters
peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}"
peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
##### transactions indexer configuration options #####
[tx_index]
@@ -239,16 +267,21 @@ peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }}
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "{{ .TxIndex.Indexer }}"
# Comma-separated list of tags to index (by default the only tag is tx hash)
# Comma-separated list of tags to index (by default the only tag is "tx.hash")
#
# You can also index transactions by height by adding "tx.height" tag here.
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = "{{ .TxIndex.IndexTags }}"
# When set to true, tells indexer to index all tags. Note this may be not
# desirable (see the comment above). IndexTags has a precedence over
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
# When set to true, tells indexer to index all tags (predefined tags:
# "tx.hash", "tx.height" and all tags from DeliverTx responses).
#
# Note this may be not desirable (see the comment above). IndexTags has a
# precedence over IndexAllTags (i.e. when given both, IndexTags will be
# indexed).
index_all_tags = {{ .TxIndex.IndexAllTags }}
##### instrumentation configuration options #####
@@ -267,6 +300,9 @@ prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}"
# you increase your OS limits.
# 0 - unlimited.
max_open_connections = {{ .Instrumentation.MaxOpenConnections }}
# Instrumentation namespace
namespace = "{{ .Instrumentation.Namespace }}"
`
/****** these are for test settings ***********/
@@ -317,7 +353,7 @@ func ResetTestRoot(testName string) *Config {
}
var testGenesis = `{
"genesis_time": "0001-01-01T00:00:00.000Z",
"genesis_time": "2018-10-10T08:20:13.695936996Z",
"chain_id": "tendermint_test",
"validators": [
{

View File

@@ -2,14 +2,15 @@ package consensus
import (
"context"
"fmt"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
func init() {
@@ -38,7 +39,13 @@ func TestByzantine(t *testing.T) {
switches := make([]*p2p.Switch, N)
p2pLogger := logger.With("module", "p2p")
for i := 0; i < N; i++ {
switches[i] = p2p.NewSwitch(config.P2P)
switches[i] = p2p.MakeSwitch(
config.P2P,
i,
"foo", "1.0.0",
func(i int, sw *p2p.Switch) *p2p.Switch {
return sw
})
switches[i].SetLogger(p2pLogger.With("validator", i))
}
@@ -156,8 +163,8 @@ func TestByzantine(t *testing.T) {
case <-done:
case <-tick.C:
for i, reactor := range reactors {
t.Log(cmn.Fmt("Consensus Reactor %v", i))
t.Log(cmn.Fmt("%v", reactor))
t.Log(fmt.Sprintf("Consensus Reactor %v", i))
t.Log(fmt.Sprintf("%v", reactor))
}
t.Fatalf("Timed out waiting for all validators to commit first block")
}
@@ -172,16 +179,16 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Cons
// Create a new proposal block from state/txs from the mempool.
block1, blockParts1 := cs.createProposalBlock()
polRound, polBlockID := cs.Votes.POLInfo()
proposal1 := types.NewProposal(height, round, blockParts1.Header(), polRound, polBlockID)
polRound, propBlockID := cs.ValidRound, types.BlockID{block1.Hash(), blockParts1.Header()}
proposal1 := types.NewProposal(height, round, polRound, propBlockID)
if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal1); err != nil {
t.Error(err)
}
// Create a new proposal block from state/txs from the mempool.
block2, blockParts2 := cs.createProposalBlock()
polRound, polBlockID = cs.Votes.POLInfo()
proposal2 := types.NewProposal(height, round, blockParts2.Header(), polRound, polBlockID)
polRound, propBlockID = cs.ValidRound, types.BlockID{block2.Hash(), blockParts2.Header()}
proposal2 := types.NewProposal(height, round, polRound, propBlockID)
if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal2); err != nil {
t.Error(err)
}
@@ -219,8 +226,8 @@ func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.
// votes
cs.mtx.Lock()
prevote, _ := cs.signVote(types.VoteTypePrevote, blockHash, parts.Header())
precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header())
prevote, _ := cs.signVote(types.PrevoteType, blockHash, parts.Header())
precommit, _ := cs.signVote(types.PrecommitType, blockHash, parts.Header())
cs.mtx.Unlock()
peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{prevote}))
@@ -256,7 +263,7 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
// Send our state to peer.
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
if !br.reactor.fastSync {
br.reactor.sendNewRoundStepMessages(peer)
br.reactor.sendNewRoundStepMessage(peer)
}
}
func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {

View File

@@ -7,6 +7,7 @@ import (
"io/ioutil"
"os"
"path"
"reflect"
"sort"
"sync"
"testing"
@@ -17,14 +18,15 @@ import (
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
cstypes "github.com/tendermint/tendermint/consensus/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/tendermint/tendermint/abci/example/counter"
"github.com/tendermint/tendermint/abci/example/kvstore"
@@ -37,8 +39,8 @@ const (
)
// genesis, chain_id, priv_val
var config *cfg.Config // NOTE: must be reset for each _test.go file
var ensureTimeout = time.Second * 1 // must be in seconds because CreateEmptyBlocksInterval is
var config *cfg.Config // NOTE: must be reset for each _test.go file
var ensureTimeout = time.Millisecond * 100
func ensureDir(dir string, mode os.FileMode) {
if err := cmn.EnsureDir(dir, mode); err != nil {
@@ -69,13 +71,13 @@ func NewValidatorStub(privValidator types.PrivValidator, valIndex int) *validato
}
}
func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
func (vs *validatorStub) signVote(voteType types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
vote := &types.Vote{
ValidatorIndex: vs.Index,
ValidatorAddress: vs.PrivValidator.GetAddress(),
Height: vs.Height,
Round: vs.Round,
Timestamp: time.Now().UTC(),
Timestamp: tmtime.Now(),
Type: voteType,
BlockID: types.BlockID{hash, header},
}
@@ -84,7 +86,7 @@ func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartS
}
// Sign vote for type/hash/header
func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSetHeader) *types.Vote {
func signVote(vs *validatorStub, voteType types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote {
v, err := vs.signVote(voteType, hash, header)
if err != nil {
panic(fmt.Errorf("failed to sign vote: %v", err))
@@ -92,7 +94,7 @@ func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSe
return v
}
func signVotes(voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote {
func signVotes(voteType types.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote {
votes := make([]*types.Vote, len(vss))
for i, vs := range vss {
votes[i] = signVote(vs, voteType, hash, header)
@@ -128,8 +130,8 @@ func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round
}
// Make proposal
polRound, polBlockID := cs1.Votes.POLInfo()
proposal = types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID)
polRound, propBlockID := cs1.ValidRound, types.BlockID{block.Hash(), blockParts.Header()}
proposal = types.NewProposal(height, round, polRound, propBlockID)
if err := vs.SignProposal(cs1.state.ChainID, proposal); err != nil {
panic(err)
}
@@ -142,7 +144,7 @@ func addVotes(to *ConsensusState, votes ...*types.Vote) {
}
}
func signAddVotes(to *ConsensusState, voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) {
func signAddVotes(to *ConsensusState, voteType types.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub) {
votes := signVotes(voteType, hash, header, vss...)
addVotes(to, votes...)
}
@@ -305,23 +307,204 @@ func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
//-------------------------------------------------------------------------------
func ensureNoNewStep(stepCh <-chan interface{}) {
timer := time.NewTimer(ensureTimeout)
func ensureNoNewEvent(ch <-chan interface{}, timeout time.Duration,
errorMessage string) {
select {
case <-timer.C:
case <-time.After(timeout):
break
case <-stepCh:
panic("We should be stuck waiting, not moving to the next step")
case <-ch:
panic(errorMessage)
}
}
func ensureNewStep(stepCh <-chan interface{}) {
timer := time.NewTimer(ensureTimeout)
func ensureNoNewEventOnChannel(ch <-chan interface{}) {
ensureNoNewEvent(
ch,
ensureTimeout,
"We should be stuck waiting, not receiving new event on the channel")
}
func ensureNoNewRoundStep(stepCh <-chan interface{}) {
ensureNoNewEvent(
stepCh,
ensureTimeout,
"We should be stuck waiting, not receiving NewRoundStep event")
}
func ensureNoNewUnlock(unlockCh <-chan interface{}) {
ensureNoNewEvent(
unlockCh,
ensureTimeout,
"We should be stuck waiting, not receiving Unlock event")
}
func ensureNoNewTimeout(stepCh <-chan interface{}, timeout int64) {
timeoutDuration := time.Duration(timeout*5) * time.Nanosecond
ensureNoNewEvent(
stepCh,
timeoutDuration,
"We should be stuck waiting, not receiving NewTimeout event")
}
func ensureNewEvent(
ch <-chan interface{},
height int64,
round int,
timeout time.Duration,
errorMessage string) {
select {
case <-timer.C:
panic("We shouldnt be stuck waiting")
case <-stepCh:
case <-time.After(timeout):
panic(errorMessage)
case ev := <-ch:
rs, ok := ev.(types.EventDataRoundState)
if !ok {
panic(
fmt.Sprintf(
"expected a EventDataRoundState, got %v.Wrong subscription channel?",
reflect.TypeOf(rs)))
}
if rs.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, rs.Height))
}
if rs.Round != round {
panic(fmt.Sprintf("expected round %v, got %v", round, rs.Round))
}
// TODO: We could check also for a step at this point!
}
}
func ensureNewRoundStep(stepCh <-chan interface{}, height int64, round int) {
ensureNewEvent(
stepCh,
height,
round,
ensureTimeout,
"Timeout expired while waiting for NewStep event")
}
func ensureNewVote(voteCh <-chan interface{}, height int64, round int) {
select {
case <-time.After(ensureTimeout):
break
case v := <-voteCh:
edv, ok := v.(types.EventDataVote)
if !ok {
panic(fmt.Sprintf("expected a *types.Vote, "+
"got %v. wrong subscription channel?",
reflect.TypeOf(v)))
}
vote := edv.Vote
if vote.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height))
}
if vote.Round != round {
panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round))
}
}
}
func ensureNewRound(roundCh <-chan interface{}, height int64, round int) {
ensureNewEvent(roundCh, height, round, ensureTimeout,
"Timeout expired while waiting for NewRound event")
}
func ensureNewTimeout(timeoutCh <-chan interface{}, height int64, round int, timeout int64) {
timeoutDuration := time.Duration(timeout*3) * time.Nanosecond
ensureNewEvent(timeoutCh, height, round, timeoutDuration,
"Timeout expired while waiting for NewTimeout event")
}
func ensureNewProposal(proposalCh <-chan interface{}, height int64, round int) {
ensureNewEvent(proposalCh, height, round, ensureTimeout,
"Timeout expired while waiting for NewProposal event")
}
func ensureNewValidBlock(validBlockCh <-chan interface{}, height int64, round int) {
ensureNewEvent(validBlockCh, height, round, ensureTimeout,
"Timeout expired while waiting for NewValidBlock event")
}
func ensureNewBlock(blockCh <-chan interface{}, height int64) {
select {
case <-time.After(ensureTimeout):
panic("Timeout expired while waiting for NewBlock event")
case ev := <-blockCh:
block, ok := ev.(types.EventDataNewBlock)
if !ok {
panic(fmt.Sprintf("expected a *types.EventDataNewBlock, "+
"got %v. wrong subscription channel?",
reflect.TypeOf(block)))
}
if block.Block.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, block.Block.Height))
}
}
}
func ensureNewBlockHeader(blockCh <-chan interface{}, height int64, blockHash cmn.HexBytes) {
select {
case <-time.After(ensureTimeout):
panic("Timeout expired while waiting for NewBlockHeader event")
case ev := <-blockCh:
blockHeader, ok := ev.(types.EventDataNewBlockHeader)
if !ok {
panic(fmt.Sprintf("expected a *types.EventDataNewBlockHeader, "+
"got %v. wrong subscription channel?",
reflect.TypeOf(blockHeader)))
}
if blockHeader.Header.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, blockHeader.Header.Height))
}
if !bytes.Equal(blockHeader.Header.Hash(), blockHash) {
panic(fmt.Sprintf("expected header %X, got %X", blockHash, blockHeader.Header.Hash()))
}
}
}
func ensureNewUnlock(unlockCh <-chan interface{}, height int64, round int) {
ensureNewEvent(unlockCh, height, round, ensureTimeout,
"Timeout expired while waiting for NewUnlock event")
}
func ensureVote(voteCh <-chan interface{}, height int64, round int,
voteType types.SignedMsgType) {
select {
case <-time.After(ensureTimeout):
panic("Timeout expired while waiting for NewVote event")
case v := <-voteCh:
edv, ok := v.(types.EventDataVote)
if !ok {
panic(fmt.Sprintf("expected a *types.Vote, "+
"got %v. wrong subscription channel?",
reflect.TypeOf(v)))
}
vote := edv.Vote
if vote.Height != height {
panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height))
}
if vote.Round != round {
panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round))
}
if vote.Type != voteType {
panic(fmt.Sprintf("expected type %v, got %v", voteType, vote.Type))
}
}
}
func ensurePrecommit(voteCh <-chan interface{}, height int64, round int) {
ensureVote(voteCh, height, round, types.PrecommitType)
}
func ensurePrevote(voteCh <-chan interface{}, height int64, round int) {
ensureVote(voteCh, height, round, types.PrevoteType)
}
func ensureNewEventOnChannel(ch <-chan interface{}) {
select {
case <-time.After(ensureTimeout):
panic("Timeout expired while waiting for new activity on the channel")
case <-ch:
}
}
@@ -348,13 +531,13 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i))
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
for _, opt := range configOpts {
opt(thisConfig)
}
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
app := appFunc()
vals := types.TM2PB.Validators(state.Validators)
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app)
@@ -372,18 +555,21 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
for i := 0; i < nPeers; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i))
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
var privVal types.PrivValidator
if i < nValidators {
privVal = privVals[i]
} else {
_, tempFilePath := cmn.Tempfile("priv_validator_")
privVal = privval.GenFilePV(tempFilePath)
tempFile, err := ioutil.TempFile("", "priv_validator_")
if err != nil {
panic(err)
}
privVal = privval.GenFilePV(tempFile.Name())
}
app := appFunc()
vals := types.TM2PB.Validators(state.Validators)
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app)
@@ -395,7 +581,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
for i, s := range switches {
if peer.NodeInfo().ID == s.NodeInfo().ID {
if peer.NodeInfo().ID() == s.NodeInfo().ID() {
return i
}
}
@@ -420,7 +606,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
sort.Sort(types.PrivValidatorsByAddress(privValidators))
return &types.GenesisDoc{
GenesisTime: time.Now(),
GenesisTime: tmtime.Now(),
ChainID: config.ChainID(),
Validators: validators,
}, privValidators
@@ -429,8 +615,6 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) {
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
s0, _ := sm.MakeGenesisState(genDoc)
db := dbm.NewMemDB()
sm.SaveState(db, s0)
return s0, privValidators
}

View File

@@ -10,8 +10,6 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
abci "github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/types"
)
@@ -29,17 +27,17 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
startTestRound(cs, height, round)
ensureNewStep(newBlockCh) // first block gets committed
ensureNoNewStep(newBlockCh)
ensureNewEventOnChannel(newBlockCh) // first block gets committed
ensureNoNewEventOnChannel(newBlockCh)
deliverTxsRange(cs, 0, 1)
ensureNewStep(newBlockCh) // commit txs
ensureNewStep(newBlockCh) // commit updated app hash
ensureNoNewStep(newBlockCh)
ensureNewEventOnChannel(newBlockCh) // commit txs
ensureNewEventOnChannel(newBlockCh) // commit updated app hash
ensureNoNewEventOnChannel(newBlockCh)
}
func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
config := ResetConfig("consensus_mempool_txs_available_test")
config.Consensus.CreateEmptyBlocksInterval = int(ensureTimeout.Seconds())
config.Consensus.CreateEmptyBlocksInterval = ensureTimeout
state, privVals := randGenesisState(1, false, 10)
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
cs.mempool.EnableTxsAvailable()
@@ -47,9 +45,9 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
startTestRound(cs, height, round)
ensureNewStep(newBlockCh) // first block gets committed
ensureNoNewStep(newBlockCh) // then we dont make a block ...
ensureNewStep(newBlockCh) // until the CreateEmptyBlocksInterval has passed
ensureNewEventOnChannel(newBlockCh) // first block gets committed
ensureNoNewEventOnChannel(newBlockCh) // then we dont make a block ...
ensureNewEventOnChannel(newBlockCh) // until the CreateEmptyBlocksInterval has passed
}
func TestMempoolProgressInHigherRound(t *testing.T) {
@@ -73,13 +71,19 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
}
startTestRound(cs, height, round)
ensureNewStep(newRoundCh) // first round at first height
ensureNewStep(newBlockCh) // first block gets committed
ensureNewStep(newRoundCh) // first round at next height
deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round
<-timeoutCh
ensureNewStep(newRoundCh) // wait for the next round
ensureNewStep(newBlockCh) // now we can commit the block
ensureNewRoundStep(newRoundCh, height, round) // first round at first height
ensureNewEventOnChannel(newBlockCh) // first block gets committed
height = height + 1 // moving to the next height
round = 0
ensureNewRoundStep(newRoundCh, height, round) // first round at next height
deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round
ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds())
round = round + 1 // moving to the next round
ensureNewRoundStep(newRoundCh, height, round) // wait for the next round
ensureNewEventOnChannel(newBlockCh) // now we can commit the block
}
func deliverTxsRange(cs *ConsensusState, start, end int) {
@@ -89,7 +93,7 @@ func deliverTxsRange(cs *ConsensusState, start, end int) {
binary.BigEndian.PutUint64(txBytes, uint64(i))
err := cs.mempool.CheckTx(txBytes, nil)
if err != nil {
panic(cmn.Fmt("Error after CheckTx: %v", err))
panic(fmt.Sprintf("Error after CheckTx: %v", err))
}
}
}
@@ -100,7 +104,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
height, round := cs.Height, cs.Round
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
NTxs := 10000
NTxs := 3000
go deliverTxsRange(cs, 0, NTxs)
startTestRound(cs, height, round)
@@ -126,7 +130,7 @@ func TestMempoolRmBadTx(t *testing.T) {
binary.BigEndian.PutUint64(txBytes, uint64(0))
resDeliver := app.DeliverTx(txBytes)
assert.False(t, resDeliver.IsErr(), cmn.Fmt("expected no error. got %v", resDeliver))
assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
resCommit := app.Commit()
assert.True(t, len(resCommit.Data) > 0)
@@ -149,7 +153,7 @@ func TestMempoolRmBadTx(t *testing.T) {
// check for the tx
for {
txs := cs.mempool.Reap(1)
txs := cs.mempool.ReapMaxBytesMaxGas(int64(len(txBytes)), -1)
if len(txs) == 0 {
emptyMempoolCh <- struct{}{}
return
@@ -190,7 +194,7 @@ func NewCounterApplication() *CounterApplication {
}
func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
return abci.ResponseInfo{Data: cmn.Fmt("txs:%v", app.txCount)}
return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)}
}
func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx {

View File

@@ -8,6 +8,8 @@ import (
stdprometheus "github.com/prometheus/client_golang/prometheus"
)
const MetricsSubsystem = "consensus"
// Metrics contains metrics exposed by this package.
type Metrics struct {
// Height of the chain.
@@ -30,7 +32,7 @@ type Metrics struct {
ByzantineValidatorsPower metrics.Gauge
// Time between this and the last block.
BlockIntervalSeconds metrics.Histogram
BlockIntervalSeconds metrics.Gauge
// Number of transactions.
NumTxs metrics.Gauge
@@ -38,75 +40,111 @@ type Metrics struct {
BlockSizeBytes metrics.Gauge
// Total number of transactions.
TotalTxs metrics.Gauge
// The latest block height.
CommittedHeight metrics.Gauge
// Whether or not a node is fast syncing. 1 if yes, 0 if no.
FastSyncing metrics.Gauge
// Number of blockparts transmitted by peer.
BlockParts metrics.Counter
}
// PrometheusMetrics returns Metrics build using Prometheus client library.
func PrometheusMetrics() *Metrics {
func PrometheusMetrics(namespace string) *Metrics {
return &Metrics{
Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Subsystem: "consensus",
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "height",
Help: "Height of the chain.",
}, []string{}),
Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Subsystem: "consensus",
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "rounds",
Help: "Number of rounds.",
}, []string{}),
Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Subsystem: "consensus",
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "validators",
Help: "Number of validators.",
}, []string{}),
ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Subsystem: "consensus",
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "validators_power",
Help: "Total power of all validators.",
}, []string{}),
MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Subsystem: "consensus",
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "missing_validators",
Help: "Number of validators who did not sign.",
}, []string{}),
MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Subsystem: "consensus",
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "missing_validators_power",
Help: "Total power of the missing validators.",
}, []string{}),
ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Subsystem: "consensus",
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "byzantine_validators",
Help: "Number of validators who tried to double sign.",
}, []string{}),
ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Subsystem: "consensus",
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "byzantine_validators_power",
Help: "Total power of the byzantine validators.",
}, []string{}),
BlockIntervalSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
Subsystem: "consensus",
BlockIntervalSeconds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "block_interval_seconds",
Help: "Time between this and the last block.",
Buckets: []float64{1, 2.5, 5, 10, 60},
}, []string{}),
NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Subsystem: "consensus",
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "num_txs",
Help: "Number of transactions.",
}, []string{}),
BlockSizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Subsystem: "consensus",
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "block_size_bytes",
Help: "Size of the block.",
}, []string{}),
TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Subsystem: "consensus",
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "total_txs",
Help: "Total number of transactions.",
}, []string{}),
CommittedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "latest_block_height",
Help: "The latest block height.",
}, []string{}),
FastSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "fast_syncing",
Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.",
}, []string{}),
BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
Namespace: namespace,
Subsystem: MetricsSubsystem,
Name: "block_parts",
Help: "Number of blockparts transmitted by peer.",
}, []string{"peer_id"}),
}
}
@@ -124,10 +162,13 @@ func NopMetrics() *Metrics {
ByzantineValidators: discard.NewGauge(),
ByzantineValidatorsPower: discard.NewGauge(),
BlockIntervalSeconds: discard.NewHistogram(),
BlockIntervalSeconds: discard.NewGauge(),
NumTxs: discard.NewGauge(),
BlockSizeBytes: discard.NewGauge(),
TotalTxs: discard.NewGauge(),
NumTxs: discard.NewGauge(),
BlockSizeBytes: discard.NewGauge(),
TotalTxs: discard.NewGauge(),
CommittedHeight: discard.NewGauge(),
FastSyncing: discard.NewGauge(),
BlockParts: discard.NewCounter(),
}
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/pkg/errors"
amino "github.com/tendermint/go-amino"
cstypes "github.com/tendermint/tendermint/consensus/types"
cmn "github.com/tendermint/tendermint/libs/common"
tmevents "github.com/tendermint/tendermint/libs/events"
@@ -17,6 +16,7 @@ import (
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
const (
@@ -28,6 +28,7 @@ const (
maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
blocksToContributeToBecomeGoodPeer = 10000
votesToContributeToBecomeGoodPeer = 10000
)
//-----------------------------------------------------------------------------
@@ -41,16 +42,27 @@ type ConsensusReactor struct {
mtx sync.RWMutex
fastSync bool
eventBus *types.EventBus
metrics *Metrics
}
type ReactorOption func(*ConsensusReactor)
// NewConsensusReactor returns a new ConsensusReactor with the given
// consensusState.
func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *ConsensusReactor {
func NewConsensusReactor(consensusState *ConsensusState, fastSync bool, options ...ReactorOption) *ConsensusReactor {
conR := &ConsensusReactor{
conS: consensusState,
fastSync: fastSync,
metrics: NopMetrics(),
}
conR.updateFastSyncingMetric()
conR.BaseReactor = *p2p.NewBaseReactor("ConsensusReactor", conR)
for _, option := range options {
option(conR)
}
return conR
}
@@ -58,9 +70,9 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *Consens
// broadcasted to other peers and starting state if we're not in fast sync.
func (conR *ConsensusReactor) OnStart() error {
conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync())
if err := conR.BaseReactor.OnStart(); err != nil {
return err
}
// start routine that computes peer statistics for evaluating peer quality
go conR.peerStatsRoutine()
conR.subscribeToBroadcastEvents()
@@ -77,7 +89,6 @@ func (conR *ConsensusReactor) OnStart() error {
// OnStop implements BaseService by unsubscribing from events and stopping
// state.
func (conR *ConsensusReactor) OnStop() {
conR.BaseReactor.OnStop()
conR.unsubscribeFromBroadcastEvents()
conR.conS.Stop()
if !conR.FastSync() {
@@ -97,6 +108,7 @@ func (conR *ConsensusReactor) SwitchToConsensus(state sm.State, blocksSynced int
conR.mtx.Lock()
conR.fastSync = false
conR.mtx.Unlock()
conR.metrics.FastSyncing.Set(0)
if blocksSynced > 0 {
// dont bother with the WAL if we fast synced
@@ -161,7 +173,7 @@ func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) {
// Send our state to peer.
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
if !conR.FastSync() {
conR.sendNewRoundStepMessages(peer)
conR.sendNewRoundStepMessage(peer)
}
}
@@ -171,7 +183,11 @@ func (conR *ConsensusReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
return
}
// TODO
//peer.Get(PeerStateKey).(*PeerState).Disconnect()
// ps, ok := peer.Get(PeerStateKey).(*PeerState)
// if !ok {
// panic(fmt.Sprintf("Peer %v has no state", peer))
// }
// ps.Disconnect()
}
// Receive implements Reactor
@@ -192,18 +208,28 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
conR.Switch.StopPeerForError(src, err)
return
}
if err = msg.ValidateBasic(); err != nil {
conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
conR.Switch.StopPeerForError(src, err)
return
}
conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg)
// Get peer states
ps := src.Get(types.PeerStateKey).(*PeerState)
ps, ok := src.Get(types.PeerStateKey).(*PeerState)
if !ok {
panic(fmt.Sprintf("Peer %v has no state", src))
}
switch chID {
case StateChannel:
switch msg := msg.(type) {
case *NewRoundStepMessage:
ps.ApplyNewRoundStepMessage(msg)
case *CommitStepMessage:
ps.ApplyCommitStepMessage(msg)
case *NewValidBlockMessage:
ps.ApplyNewValidBlockMessage(msg)
case *HasVoteMessage:
ps.ApplyHasVoteMessage(msg)
case *VoteSetMaj23Message:
@@ -224,13 +250,12 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
// (and consequently shows which we don't have)
var ourVotes *cmn.BitArray
switch msg.Type {
case types.VoteTypePrevote:
case types.PrevoteType:
ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
case types.VoteTypePrecommit:
case types.PrecommitType:
ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
default:
conR.Logger.Error("Bad VoteSetBitsMessage field Type")
return
panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
}
src.TrySend(VoteSetBitsChannel, cdc.MustMarshalBinaryBare(&VoteSetBitsMessage{
Height: msg.Height,
@@ -245,7 +270,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
"height", hb.Height, "round", hb.Round, "sequence", hb.Sequence,
"valIdx", hb.ValidatorIndex, "valAddr", hb.ValidatorAddress)
default:
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
case DataChannel:
@@ -261,12 +286,10 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
ps.ApplyProposalPOLMessage(msg)
case *BlockPartMessage:
ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index)
if numBlocks := ps.RecordBlockPart(msg); numBlocks%blocksToContributeToBecomeGoodPeer == 0 {
conR.Switch.MarkPeerAsGood(src)
}
conR.metrics.BlockParts.With("peer_id", string(src.ID())).Add(1)
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
default:
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
case VoteChannel:
@@ -277,21 +300,18 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
switch msg := msg.(type) {
case *VoteMessage:
cs := conR.conS
cs.mtx.Lock()
cs.mtx.RLock()
height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size()
cs.mtx.Unlock()
cs.mtx.RUnlock()
ps.EnsureVoteBitArrays(height, valSize)
ps.EnsureVoteBitArrays(height-1, lastCommitSize)
ps.SetHasVote(msg.Vote)
if blocks := ps.RecordVote(msg.Vote); blocks%blocksToContributeToBecomeGoodPeer == 0 {
conR.Switch.MarkPeerAsGood(src)
}
cs.peerMsgQueue <- msgInfo{msg, src.ID()}
default:
// don't punish (leave room for soft upgrades)
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
case VoteSetBitsChannel:
@@ -309,13 +329,12 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
if height == msg.Height {
var ourVotes *cmn.BitArray
switch msg.Type {
case types.VoteTypePrevote:
case types.PrevoteType:
ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID)
case types.VoteTypePrecommit:
case types.PrecommitType:
ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID)
default:
conR.Logger.Error("Bad VoteSetBitsMessage field Type")
return
panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?")
}
ps.ApplyVoteSetBitsMessage(msg, ourVotes)
} else {
@@ -323,11 +342,11 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
}
default:
// don't punish (leave room for soft upgrades)
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
default:
conR.Logger.Error(cmn.Fmt("Unknown chId %X", chID))
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID))
}
if err != nil {
@@ -357,7 +376,12 @@ func (conR *ConsensusReactor) subscribeToBroadcastEvents() {
const subscriber = "consensus-reactor"
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep,
func(data tmevents.EventData) {
conR.broadcastNewRoundStepMessages(data.(*cstypes.RoundState))
conR.broadcastNewRoundStepMessage(data.(*cstypes.RoundState))
})
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock,
func(data tmevents.EventData) {
conR.broadcastNewValidBlockMessage(data.(*cstypes.RoundState))
})
conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote,
@@ -383,14 +407,20 @@ func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(hb *types.Heartb
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
}
func (conR *ConsensusReactor) broadcastNewRoundStepMessages(rs *cstypes.RoundState) {
nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil {
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
}
if csMsg != nil {
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg))
func (conR *ConsensusReactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) {
nrsMsg := makeRoundStepMessage(rs)
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
}
func (conR *ConsensusReactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
csMsg := &NewValidBlockMessage{
Height: rs.Height,
Round: rs.Round,
BlockPartsHeader: rs.ProposalBlockParts.Header(),
BlockParts: rs.ProposalBlockParts.BitArray(),
IsCommit: rs.Step == cstypes.RoundStepCommit,
}
conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg))
}
// Broadcasts HasVoteMessage to peers that care.
@@ -405,7 +435,10 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
/*
// TODO: Make this broadcast more selective.
for _, peer := range conR.Switch.Peers().List() {
ps := peer.Get(PeerStateKey).(*PeerState)
ps, ok := peer.Get(PeerStateKey).(*PeerState)
if !ok {
panic(fmt.Sprintf("Peer %v has no state", peer))
}
prs := ps.GetRoundState()
if prs.Height == vote.Height {
// TODO: Also filter on round?
@@ -419,7 +452,7 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
*/
}
func makeRoundStepMessages(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage, csMsg *CommitStepMessage) {
func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) {
nrsMsg = &NewRoundStepMessage{
Height: rs.Height,
Round: rs.Round,
@@ -427,25 +460,13 @@ func makeRoundStepMessages(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage,
SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()),
LastCommitRound: rs.LastCommit.Round(),
}
if rs.Step == cstypes.RoundStepCommit {
csMsg = &CommitStepMessage{
Height: rs.Height,
BlockPartsHeader: rs.ProposalBlockParts.Header(),
BlockParts: rs.ProposalBlockParts.BitArray(),
}
}
return
}
func (conR *ConsensusReactor) sendNewRoundStepMessages(peer p2p.Peer) {
func (conR *ConsensusReactor) sendNewRoundStepMessage(peer p2p.Peer) {
rs := conR.conS.GetRoundState()
nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil {
peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
}
if csMsg != nil {
peer.Send(StateChannel, cdc.MustMarshalBinaryBare(csMsg))
}
nrsMsg := makeRoundStepMessage(rs)
peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
}
func (conR *ConsensusReactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) {
@@ -486,7 +507,7 @@ OUTER_LOOP:
if prs.ProposalBlockParts == nil {
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
if blockMeta == nil {
cmn.PanicCrisis(cmn.Fmt("Failed to load block %d when blockStore is at %d",
cmn.PanicCrisis(fmt.Sprintf("Failed to load block %d when blockStore is at %d",
prs.Height, conR.conS.blockStore.Height()))
}
ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader)
@@ -500,7 +521,7 @@ OUTER_LOOP:
// If height and round don't match, sleep.
if (rs.Height != prs.Height) || (rs.Round != prs.Round) {
//logger.Info("Peer Height|Round mismatch, sleeping", "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer)
time.Sleep(conR.conS.config.PeerGossipSleep())
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
continue OUTER_LOOP
}
@@ -516,6 +537,7 @@ OUTER_LOOP:
msg := &ProposalMessage{Proposal: rs.Proposal}
logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) {
// NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected!
ps.SetHasProposal(rs.Proposal)
}
}
@@ -536,7 +558,7 @@ OUTER_LOOP:
}
// Nothing to do. Sleep.
time.Sleep(conR.conS.config.PeerGossipSleep())
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
continue OUTER_LOOP
}
}
@@ -550,12 +572,12 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
if blockMeta == nil {
logger.Error("Failed to load block meta",
"ourHeight", rs.Height, "blockstoreHeight", conR.conS.blockStore.Height())
time.Sleep(conR.conS.config.PeerGossipSleep())
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
return
} else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) {
logger.Info("Peer ProposalBlockPartsHeader mismatch, sleeping",
"blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
time.Sleep(conR.conS.config.PeerGossipSleep())
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
return
}
// Load the part
@@ -563,7 +585,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
if part == nil {
logger.Error("Could not load part", "index", index,
"blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
time.Sleep(conR.conS.config.PeerGossipSleep())
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
return
}
// Send the part
@@ -581,7 +603,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
return
}
//logger.Info("No parts to send in catch-up, sleeping")
time.Sleep(conR.conS.config.PeerGossipSleep())
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
}
func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
@@ -650,7 +672,7 @@ OUTER_LOOP:
sleeping = 1
}
time.Sleep(conR.conS.config.PeerGossipSleep())
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
continue OUTER_LOOP
}
}
@@ -731,10 +753,10 @@ OUTER_LOOP:
peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.Round,
Type: types.VoteTypePrevote,
Type: types.PrevoteType,
BlockID: maj23,
}))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
}
}
}
@@ -748,10 +770,10 @@ OUTER_LOOP:
peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.Round,
Type: types.VoteTypePrecommit,
Type: types.PrecommitType,
BlockID: maj23,
}))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
}
}
}
@@ -765,10 +787,10 @@ OUTER_LOOP:
peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height,
Round: prs.ProposalPOLRound,
Type: types.VoteTypePrevote,
Type: types.PrevoteType,
BlockID: maj23,
}))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
}
}
}
@@ -784,19 +806,59 @@ OUTER_LOOP:
peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height,
Round: commit.Round(),
Type: types.VoteTypePrecommit,
Type: types.PrecommitType,
BlockID: commit.BlockID,
}))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
}
}
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
continue OUTER_LOOP
}
}
func (conR *ConsensusReactor) peerStatsRoutine() {
for {
if !conR.IsRunning() {
conR.Logger.Info("Stopping peerStatsRoutine")
return
}
select {
case msg := <-conR.conS.statsMsgQueue:
// Get peer
peer := conR.Switch.Peers().Get(msg.PeerID)
if peer == nil {
conR.Logger.Debug("Attempt to update stats for non-existent peer",
"peer", msg.PeerID)
continue
}
// Get peer state
ps, ok := peer.Get(types.PeerStateKey).(*PeerState)
if !ok {
panic(fmt.Sprintf("Peer %v has no state", peer))
}
switch msg.Msg.(type) {
case *VoteMessage:
if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 {
conR.Switch.MarkPeerAsGood(peer)
}
case *BlockPartMessage:
if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 {
conR.Switch.MarkPeerAsGood(peer)
}
}
case <-conR.conS.Quit():
return
case <-conR.Quit():
return
}
}
}
// String returns a string representation of the ConsensusReactor.
// NOTE: For now, it is just a hard-coded string to avoid accessing unprotected shared variables.
// TODO: improve!
@@ -810,13 +872,31 @@ func (conR *ConsensusReactor) StringIndented(indent string) string {
s := "ConsensusReactor{\n"
s += indent + " " + conR.conS.StringIndented(indent+" ") + "\n"
for _, peer := range conR.Switch.Peers().List() {
ps := peer.Get(types.PeerStateKey).(*PeerState)
ps, ok := peer.Get(types.PeerStateKey).(*PeerState)
if !ok {
panic(fmt.Sprintf("Peer %v has no state", peer))
}
s += indent + " " + ps.StringIndented(indent+" ") + "\n"
}
s += indent + "}"
return s
}
func (conR *ConsensusReactor) updateFastSyncingMetric() {
var fastSyncing float64
if conR.fastSync {
fastSyncing = 1
} else {
fastSyncing = 0
}
conR.metrics.FastSyncing.Set(fastSyncing)
}
// ReactorMetrics sets the metrics
func ReactorMetrics(metrics *Metrics) ReactorOption {
return func(conR *ConsensusReactor) { conR.metrics = metrics }
}
//-----------------------------------------------------------------------------
var (
@@ -839,15 +919,13 @@ type PeerState struct {
// peerStateStats holds internal statistics for a peer.
type peerStateStats struct {
LastVoteHeight int64 `json:"last_vote_height"`
Votes int `json:"votes"`
LastBlockPartHeight int64 `json:"last_block_part_height"`
BlockParts int `json:"block_parts"`
Votes int `json:"votes"`
BlockParts int `json:"block_parts"`
}
func (pss peerStateStats) String() string {
return fmt.Sprintf("peerStateStats{lvh: %d, votes: %d, lbph: %d, blockParts: %d}",
pss.LastVoteHeight, pss.Votes, pss.LastBlockPartHeight, pss.BlockParts)
return fmt.Sprintf("peerStateStats{votes: %d, blockParts: %d}",
pss.Votes, pss.BlockParts)
}
// NewPeerState returns a new PeerState for the given Peer
@@ -906,13 +984,20 @@ func (ps *PeerState) SetHasProposal(proposal *types.Proposal) {
if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round {
return
}
if ps.PRS.Proposal {
return
}
ps.PRS.Proposal = true
ps.PRS.ProposalBlockPartsHeader = proposal.BlockPartsHeader
ps.PRS.ProposalBlockParts = cmn.NewBitArray(proposal.BlockPartsHeader.Total)
// ps.PRS.ProposalBlockParts is set due to NewValidBlockMessage
if ps.PRS.ProposalBlockParts != nil {
return
}
ps.PRS.ProposalBlockPartsHeader = proposal.BlockID.PartsHeader
ps.PRS.ProposalBlockParts = cmn.NewBitArray(proposal.BlockID.PartsHeader.Total)
ps.PRS.ProposalPOLRound = proposal.POLRound
ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received.
}
@@ -948,7 +1033,11 @@ func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
if vote, ok := ps.PickVoteToSend(votes); ok {
msg := &VoteMessage{vote}
ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
return ps.peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg))
if ps.peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg)) {
ps.SetHasVote(vote)
return true
}
return false
}
return false
}
@@ -964,7 +1053,7 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote
return nil, false
}
height, round, type_, size := votes.Height(), votes.Round(), votes.Type(), votes.Size()
height, round, type_, size := votes.Height(), votes.Round(), types.SignedMsgType(votes.Type()), votes.Size()
// Lazily set data using 'votes'.
if votes.IsCommit() {
@@ -977,13 +1066,12 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote
return nil, false // Not something worth sending
}
if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok {
ps.setHasVote(height, round, type_, index)
return votes.GetByIndex(index), true
}
return nil, false
}
func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.BitArray {
func (ps *PeerState) getVoteBitArray(height int64, round int, type_ types.SignedMsgType) *cmn.BitArray {
if !types.IsVoteTypeValid(type_) {
return nil
}
@@ -991,25 +1079,25 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.B
if ps.PRS.Height == height {
if ps.PRS.Round == round {
switch type_ {
case types.VoteTypePrevote:
case types.PrevoteType:
return ps.PRS.Prevotes
case types.VoteTypePrecommit:
case types.PrecommitType:
return ps.PRS.Precommits
}
}
if ps.PRS.CatchupCommitRound == round {
switch type_ {
case types.VoteTypePrevote:
case types.PrevoteType:
return nil
case types.VoteTypePrecommit:
case types.PrecommitType:
return ps.PRS.CatchupCommit
}
}
if ps.PRS.ProposalPOLRound == round {
switch type_ {
case types.VoteTypePrevote:
case types.PrevoteType:
return ps.PRS.ProposalPOL
case types.VoteTypePrecommit:
case types.PrecommitType:
return nil
}
}
@@ -1018,9 +1106,9 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.B
if ps.PRS.Height == height+1 {
if ps.PRS.LastCommitRound == round {
switch type_ {
case types.VoteTypePrevote:
case types.PrevoteType:
return nil
case types.VoteTypePrecommit:
case types.PrecommitType:
return ps.PRS.LastCommit
}
}
@@ -1038,7 +1126,7 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida
NOTE: This is wrong, 'round' could change.
e.g. if orig round is not the same as block LastCommit round.
if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round {
cmn.PanicSanity(cmn.Fmt("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
cmn.PanicSanity(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
}
*/
if ps.PRS.CatchupCommitRound == round {
@@ -1083,18 +1171,14 @@ func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) {
}
}
// RecordVote updates internal statistics for this peer by recording the vote.
// It returns the total number of votes (1 per block). This essentially means
// the number of blocks for which peer has been sending us votes.
func (ps *PeerState) RecordVote(vote *types.Vote) int {
// RecordVote increments internal votes related statistics for this peer.
// It returns the total number of added votes.
func (ps *PeerState) RecordVote() int {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if ps.Stats.LastVoteHeight >= vote.Height {
return ps.Stats.Votes
}
ps.Stats.LastVoteHeight = vote.Height
ps.Stats.Votes++
return ps.Stats.Votes
}
@@ -1107,25 +1191,17 @@ func (ps *PeerState) VotesSent() int {
return ps.Stats.Votes
}
// RecordBlockPart updates internal statistics for this peer by recording the
// block part. It returns the total number of block parts (1 per block). This
// essentially means the number of blocks for which peer has been sending us
// block parts.
func (ps *PeerState) RecordBlockPart(bp *BlockPartMessage) int {
// RecordBlockPart increments internal block part related statistics for this peer.
// It returns the total number of added block parts.
func (ps *PeerState) RecordBlockPart() int {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if ps.Stats.LastBlockPartHeight >= bp.Height {
return ps.Stats.BlockParts
}
ps.Stats.LastBlockPartHeight = bp.Height
ps.Stats.BlockParts++
return ps.Stats.BlockParts
}
// BlockPartsSent returns the number of blocks for which peer has been sending
// us block parts.
// BlockPartsSent returns the number of useful block parts the peer has sent us.
func (ps *PeerState) BlockPartsSent() int {
ps.mtx.Lock()
defer ps.mtx.Unlock()
@@ -1141,8 +1217,8 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) {
ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex)
}
func (ps *PeerState) setHasVote(height int64, round int, type_ byte, index int) {
logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", cmn.Fmt("%d/%d", height, round))
func (ps *PeerState) setHasVote(height int64, round int, type_ types.SignedMsgType, index int) {
logger := ps.logger.With("peerH/R", fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", fmt.Sprintf("%d/%d", height, round))
logger.Debug("setHasVote", "type", type_, "index", index)
// NOTE: some may be nil BitArrays -> no side effects.
@@ -1165,11 +1241,10 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
// Just remember these values.
psHeight := ps.PRS.Height
psRound := ps.PRS.Round
//psStep := ps.PRS.Step
psCatchupCommitRound := ps.PRS.CatchupCommitRound
psCatchupCommit := ps.PRS.CatchupCommit
startTime := time.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
ps.PRS.Height = msg.Height
ps.PRS.Round = msg.Round
ps.PRS.Step = msg.Step
@@ -1206,8 +1281,8 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
}
}
// ApplyCommitStepMessage updates the peer state for the new commit.
func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) {
// ApplyNewValidBlockMessage updates the peer state for the new valid block.
func (ps *PeerState) ApplyNewValidBlockMessage(msg *NewValidBlockMessage) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
@@ -1215,6 +1290,10 @@ func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) {
return
}
if ps.PRS.Round != msg.Round && !msg.IsCommit {
return
}
ps.PRS.ProposalBlockPartsHeader = msg.BlockPartsHeader
ps.PRS.ProposalBlockParts = msg.BlockParts
}
@@ -1293,12 +1372,14 @@ func (ps *PeerState) StringIndented(indent string) string {
// Messages
// ConsensusMessage is a message that can be sent and received on the ConsensusReactor
type ConsensusMessage interface{}
type ConsensusMessage interface {
ValidateBasic() error
}
func RegisterConsensusMessages(cdc *amino.Codec) {
cdc.RegisterInterface((*ConsensusMessage)(nil), nil)
cdc.RegisterConcrete(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage", nil)
cdc.RegisterConcrete(&CommitStepMessage{}, "tendermint/CommitStep", nil)
cdc.RegisterConcrete(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage", nil)
cdc.RegisterConcrete(&ProposalMessage{}, "tendermint/Proposal", nil)
cdc.RegisterConcrete(&ProposalPOLMessage{}, "tendermint/ProposalPOL", nil)
cdc.RegisterConcrete(&BlockPartMessage{}, "tendermint/BlockPart", nil)
@@ -1329,6 +1410,27 @@ type NewRoundStepMessage struct {
LastCommitRound int
}
// ValidateBasic performs basic validation.
func (m *NewRoundStepMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
if m.Round < 0 {
return errors.New("Negative Round")
}
if !m.Step.IsValid() {
return errors.New("Invalid Step")
}
// NOTE: SecondsSinceStartTime may be negative
if (m.Height == 1 && m.LastCommitRound != -1) ||
(m.Height > 1 && m.LastCommitRound < -1) { // TODO: #2737 LastCommitRound should always be >= 0 for heights > 1
return errors.New("Invalid LastCommitRound (for 1st block: -1, for others: >= 0)")
}
return nil
}
// String returns a string representation.
func (m *NewRoundStepMessage) String() string {
return fmt.Sprintf("[NewRoundStep H:%v R:%v S:%v LCR:%v]",
@@ -1337,16 +1439,40 @@ func (m *NewRoundStepMessage) String() string {
//-------------------------------------
// CommitStepMessage is sent when a block is committed.
type CommitStepMessage struct {
// NewValidBlockMessage is sent when a validator observes a valid block B in some round r,
//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
// In case the block is also committed, then IsCommit flag is set to true.
type NewValidBlockMessage struct {
Height int64
Round int
BlockPartsHeader types.PartSetHeader
BlockParts *cmn.BitArray
IsCommit bool
}
// ValidateBasic performs basic validation.
func (m *NewValidBlockMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
if m.Round < 0 {
return errors.New("Negative Round")
}
if err := m.BlockPartsHeader.ValidateBasic(); err != nil {
return fmt.Errorf("Wrong BlockPartsHeader: %v", err)
}
if m.BlockParts.Size() != m.BlockPartsHeader.Total {
return fmt.Errorf("BlockParts bit array size %d not equal to BlockPartsHeader.Total %d",
m.BlockParts.Size(),
m.BlockPartsHeader.Total)
}
return nil
}
// String returns a string representation.
func (m *CommitStepMessage) String() string {
return fmt.Sprintf("[CommitStep H:%v BP:%v BA:%v]", m.Height, m.BlockPartsHeader, m.BlockParts)
func (m *NewValidBlockMessage) String() string {
return fmt.Sprintf("[ValidBlockMessage H:%v R:%v BP:%v BA:%v IsCommit:%v]",
m.Height, m.Round, m.BlockPartsHeader, m.BlockParts, m.IsCommit)
}
//-------------------------------------
@@ -1356,6 +1482,11 @@ type ProposalMessage struct {
Proposal *types.Proposal
}
// ValidateBasic performs basic validation.
func (m *ProposalMessage) ValidateBasic() error {
return m.Proposal.ValidateBasic()
}
// String returns a string representation.
func (m *ProposalMessage) String() string {
return fmt.Sprintf("[Proposal %v]", m.Proposal)
@@ -1370,6 +1501,20 @@ type ProposalPOLMessage struct {
ProposalPOL *cmn.BitArray
}
// ValidateBasic performs basic validation.
func (m *ProposalPOLMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
if m.ProposalPOLRound < 0 {
return errors.New("Negative ProposalPOLRound")
}
if m.ProposalPOL.Size() == 0 {
return errors.New("Empty ProposalPOL bit array")
}
return nil
}
// String returns a string representation.
func (m *ProposalPOLMessage) String() string {
return fmt.Sprintf("[ProposalPOL H:%v POLR:%v POL:%v]", m.Height, m.ProposalPOLRound, m.ProposalPOL)
@@ -1384,6 +1529,20 @@ type BlockPartMessage struct {
Part *types.Part
}
// ValidateBasic performs basic validation.
func (m *BlockPartMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
if m.Round < 0 {
return errors.New("Negative Round")
}
if err := m.Part.ValidateBasic(); err != nil {
return fmt.Errorf("Wrong Part: %v", err)
}
return nil
}
// String returns a string representation.
func (m *BlockPartMessage) String() string {
return fmt.Sprintf("[BlockPart H:%v R:%v P:%v]", m.Height, m.Round, m.Part)
@@ -1396,6 +1555,11 @@ type VoteMessage struct {
Vote *types.Vote
}
// ValidateBasic performs basic validation.
func (m *VoteMessage) ValidateBasic() error {
return m.Vote.ValidateBasic()
}
// String returns a string representation.
func (m *VoteMessage) String() string {
return fmt.Sprintf("[Vote %v]", m.Vote)
@@ -1407,10 +1571,27 @@ func (m *VoteMessage) String() string {
type HasVoteMessage struct {
Height int64
Round int
Type byte
Type types.SignedMsgType
Index int
}
// ValidateBasic performs basic validation.
func (m *HasVoteMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
if m.Round < 0 {
return errors.New("Negative Round")
}
if !types.IsVoteTypeValid(m.Type) {
return errors.New("Invalid Type")
}
if m.Index < 0 {
return errors.New("Negative Index")
}
return nil
}
// String returns a string representation.
func (m *HasVoteMessage) String() string {
return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v}]", m.Index, m.Height, m.Round, m.Type)
@@ -1422,10 +1603,27 @@ func (m *HasVoteMessage) String() string {
type VoteSetMaj23Message struct {
Height int64
Round int
Type byte
Type types.SignedMsgType
BlockID types.BlockID
}
// ValidateBasic performs basic validation.
func (m *VoteSetMaj23Message) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
if m.Round < 0 {
return errors.New("Negative Round")
}
if !types.IsVoteTypeValid(m.Type) {
return errors.New("Invalid Type")
}
if err := m.BlockID.ValidateBasic(); err != nil {
return fmt.Errorf("Wrong BlockID: %v", err)
}
return nil
}
// String returns a string representation.
func (m *VoteSetMaj23Message) String() string {
return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID)
@@ -1437,11 +1635,29 @@ func (m *VoteSetMaj23Message) String() string {
type VoteSetBitsMessage struct {
Height int64
Round int
Type byte
Type types.SignedMsgType
BlockID types.BlockID
Votes *cmn.BitArray
}
// ValidateBasic performs basic validation.
func (m *VoteSetBitsMessage) ValidateBasic() error {
if m.Height < 0 {
return errors.New("Negative Height")
}
if m.Round < 0 {
return errors.New("Negative Round")
}
if !types.IsVoteTypeValid(m.Type) {
return errors.New("Invalid Type")
}
if err := m.BlockID.ValidateBasic(); err != nil {
return fmt.Errorf("Wrong BlockID: %v", err)
}
// NOTE: Votes.Size() can be zero if the node does not have any
return nil
}
// String returns a string representation.
func (m *VoteSetBitsMessage) String() string {
return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes)
@@ -1454,6 +1670,11 @@ type ProposalHeartbeatMessage struct {
Heartbeat *types.Heartbeat
}
// ValidateBasic performs basic validation.
func (m *ProposalHeartbeatMessage) ValidateBasic() error {
return m.Heartbeat.ValidateBasic()
}
// String returns a string representation.
func (m *ProposalHeartbeatMessage) String() string {
return fmt.Sprintf("[HEARTBEAT %v]", m.Heartbeat)

View File

@@ -4,24 +4,27 @@ import (
"context"
"fmt"
"os"
"path"
"runtime"
"runtime/pprof"
"sync"
"testing"
"time"
"github.com/tendermint/tendermint/abci/example/kvstore"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
sm "github.com/tendermint/tendermint/state"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/p2p"
p2pdummy "github.com/tendermint/tendermint/p2p/dummy"
"github.com/tendermint/tendermint/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
func init() {
@@ -94,49 +97,122 @@ func TestReactorBasic(t *testing.T) {
// Ensure we can process blocks with evidence
func TestReactorWithEvidence(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
evpool := mockEvidencePool{
t: t,
ev: []types.Evidence{types.NewMockGoodEvidence(1, 1, []byte("somone"))},
types.RegisterMockEvidences(cdc)
types.RegisterMockEvidences(types.GetCodec())
nValidators := 4
testName := "consensus_reactor_test"
tickerFunc := newMockTickerFunc(true)
appFunc := newCounter
// heed the advice from https://www.sandimetz.com/blog/2016/1/20/the-wrong-abstraction
// to unroll unwieldy abstractions. Here we duplicate the code from:
// css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
css := make([]*ConsensusState, nValidators)
logger := consensusLogger()
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
app := appFunc()
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
pv := privVals[i]
// duplicate code from:
// css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app)
blockDB := dbm.NewMemDB()
blockStore := bc.NewBlockStore(blockDB)
// one for mempool, one for consensus
mtx := new(sync.Mutex)
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
// Make Mempool
mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
if thisConfig.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
}
// mock the evidence pool
// everyone includes evidence of another double signing
vIdx := (i + 1) % nValidators
evpool := newMockEvidencePool(privVals[vIdx].GetAddress())
// Make ConsensusState
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
cs.SetPrivValidator(pv)
eventBus := types.NewEventBus()
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
eventBus.Start()
cs.SetEventBus(eventBus)
cs.SetTimeoutTicker(tickerFunc())
cs.SetLogger(logger.With("validator", i, "module", "consensus"))
css[i] = cs
}
for i := 0; i < N; i++ {
css[i].evpool = evpool
}
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
reactors, eventChans, eventBuses := startConsensusNet(t, css, nValidators)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
// wait till everyone makes the first new block
timeoutWaitGroup(t, N, func(j int) {
<-eventChans[j]
// wait till everyone makes the first new block with no evidence
timeoutWaitGroup(t, nValidators, func(j int) {
blockI := <-eventChans[j]
block := blockI.(types.EventDataNewBlock).Block
assert.True(t, len(block.Evidence.Evidence) == 0)
}, css)
// second block should have evidence
timeoutWaitGroup(t, N, func(j int) {
<-eventChans[j]
timeoutWaitGroup(t, nValidators, func(j int) {
blockI := <-eventChans[j]
block := blockI.(types.EventDataNewBlock).Block
assert.True(t, len(block.Evidence.Evidence) > 0)
}, css)
}
// mock evidence pool returns no evidence for block 1,
// and returnes one piece for all higher blocks. The one piece
// is for a given validator at block 1.
type mockEvidencePool struct {
height int
ev []types.Evidence
t *testing.T
}
func (m mockEvidencePool) PendingEvidence() []types.Evidence {
func newMockEvidencePool(val []byte) *mockEvidencePool {
return &mockEvidencePool{
ev: []types.Evidence{types.NewMockGoodEvidence(1, 1, val)},
}
}
// NOTE: maxBytes is ignored
func (m *mockEvidencePool) PendingEvidence(maxBytes int64) []types.Evidence {
if m.height > 0 {
return m.ev
}
return nil
}
func (m mockEvidencePool) AddEvidence(types.Evidence) error { return nil }
func (m mockEvidencePool) Update(block *types.Block, state sm.State) {
m.height += 1
func (m *mockEvidencePool) AddEvidence(types.Evidence) error { return nil }
func (m *mockEvidencePool) Update(block *types.Block, state sm.State) {
if m.height > 0 {
require.True(m.t, len(block.Evidence.Evidence) > 0)
if len(block.Evidence.Evidence) == 0 {
panic("block has no evidence")
}
}
m.height++
}
//------------------------------------
// Ensure a testnet sends proposal heartbeats and makes blocks when there are txs
func TestReactorProposalHeartbeats(t *testing.T) {
N := 4
@@ -169,110 +245,25 @@ func TestReactorProposalHeartbeats(t *testing.T) {
}, css)
}
// Test we record block parts from other peers
func TestReactorRecordsBlockParts(t *testing.T) {
// create dummy peer
peer := p2pdummy.NewPeer()
ps := NewPeerState(peer).SetLogger(log.TestingLogger())
peer.Set(types.PeerStateKey, ps)
// Test we record stats about votes and block parts from other peers.
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
// create reactor
css := randConsensusNet(1, "consensus_reactor_records_block_parts_test", newMockTickerFunc(true), newPersistentKVStore)
reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states
reactor.SetEventBus(css[0].eventBus)
reactor.SetLogger(log.TestingLogger())
sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw })
reactor.SetSwitch(sw)
err := reactor.Start()
require.NoError(t, err)
defer reactor.Stop()
// wait till everyone makes the first new block
timeoutWaitGroup(t, N, func(j int) {
<-eventChans[j]
}, css)
// 1) new block part
parts := types.NewPartSetFromData(cmn.RandBytes(100), 10)
msg := &BlockPartMessage{
Height: 2,
Round: 0,
Part: parts.GetPart(0),
}
bz, err := cdc.MarshalBinaryBare(msg)
require.NoError(t, err)
// Get peer
peer := reactors[1].Switch.Peers().List()[0]
// Get peer state
ps := peer.Get(types.PeerStateKey).(*PeerState)
reactor.Receive(DataChannel, peer, bz)
require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should have increased by 1")
// 2) block part with the same height, but different round
msg.Round = 1
bz, err = cdc.MarshalBinaryBare(msg)
require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz)
require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
// 3) block part from earlier height
msg.Height = 1
msg.Round = 0
bz, err = cdc.MarshalBinaryBare(msg)
require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz)
require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
}
// Test we record votes from other peers
func TestReactorRecordsVotes(t *testing.T) {
// create dummy peer
peer := p2pdummy.NewPeer()
ps := NewPeerState(peer).SetLogger(log.TestingLogger())
peer.Set(types.PeerStateKey, ps)
// create reactor
css := randConsensusNet(1, "consensus_reactor_records_votes_test", newMockTickerFunc(true), newPersistentKVStore)
reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states
reactor.SetEventBus(css[0].eventBus)
reactor.SetLogger(log.TestingLogger())
sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw })
reactor.SetSwitch(sw)
err := reactor.Start()
require.NoError(t, err)
defer reactor.Stop()
_, val := css[0].state.Validators.GetByIndex(0)
// 1) new vote
vote := &types.Vote{
ValidatorIndex: 0,
ValidatorAddress: val.Address,
Height: 2,
Round: 0,
Timestamp: time.Now().UTC(),
Type: types.VoteTypePrevote,
BlockID: types.BlockID{},
}
bz, err := cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz)
assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should have increased by 1")
// 2) vote with the same height, but different round
vote.Round = 1
bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz)
assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same")
// 3) vote from earlier height
vote.Height = 1
vote.Round = 0
bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz)
assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same")
assert.Equal(t, true, ps.VotesSent() > 0, "number of votes sent should have increased")
assert.Equal(t, true, ps.BlockPartsSent() > 0, "number of votes sent should have increased")
}
//-------------------------------------------------------------
@@ -465,7 +456,7 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}
err := validateBlock(newBlock, activeVals)
assert.Nil(t, err)
for _, tx := range txs {
css[j].mempool.CheckTx(tx, nil)
err := css[j].mempool.CheckTx(tx, nil)
assert.Nil(t, err)
}
}, css)

View File

@@ -11,6 +11,7 @@ import (
"time"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/version"
//auto "github.com/tendermint/tendermint/libs/autofile"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
@@ -19,7 +20,6 @@ import (
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
)
var crc32c = crc32.MakeTable(crc32.Castagnoli)
@@ -73,7 +73,7 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan
case *ProposalMessage:
p := msg.Proposal
cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header",
p.BlockPartsHeader, "pol", p.POLRound, "peer", peerID)
p.BlockID.PartsHeader, "pol", p.POLRound, "peer", peerID)
case *BlockPartMessage:
cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID)
case *VoteMessage:
@@ -227,7 +227,7 @@ func (h *Handshaker) NBlocks() int {
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
// Handshake is done via ABCI Info on the query conn.
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{version.Version})
res, err := proxyApp.Query().InfoSync(proxy.RequestInfo)
if err != nil {
return fmt.Errorf("Error calling Info: %v", err)
}
@@ -238,9 +238,15 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
}
appHash := res.LastBlockAppHash
h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
h.logger.Info("ABCI Handshake App Info",
"height", blockHeight,
"hash", fmt.Sprintf("%X", appHash),
"software-version", res.Version,
"protocol-version", res.AppVersion,
)
// TODO: check app version.
// Set AppVersion on the state.
h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion)
// Replay blocks up to the latest in the blockstore.
_, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp)
@@ -258,21 +264,25 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
// Replay all blocks since appBlockHeight and ensure the result matches the current state.
// Returns the final AppHash or an error.
func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) {
func (h *Handshaker) ReplayBlocks(
state sm.State,
appHash []byte,
appBlockHeight int64,
proxyApp proxy.AppConns,
) ([]byte, error) {
storeBlockHeight := h.store.Height()
stateBlockHeight := state.LastBlockHeight
h.logger.Info("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight)
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain.
if appBlockHeight == 0 {
validators := types.TM2PB.Validators(state.Validators)
nextVals := types.TM2PB.ValidatorUpdates(state.NextValidators) // state.Validators would work too.
csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams)
req := abci.RequestInitChain{
Time: h.genDoc.GenesisTime.Unix(), // TODO
Time: h.genDoc.GenesisTime,
ChainId: h.genDoc.ChainID,
ConsensusParams: csParams,
Validators: validators,
Validators: nextVals,
AppStateBytes: h.genDoc.AppState,
}
res, err := proxyApp.Consensus().InitChainSync(req)
@@ -280,15 +290,14 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
return nil, err
}
// if the app returned validators
// or consensus params, update the state
// with the them
// If the app returned validators or consensus params, update the state.
if len(res.Validators) > 0 {
vals, err := types.PB2TM.Validators(res.Validators)
vals, err := types.PB2TM.ValidatorUpdates(res.Validators)
if err != nil {
return nil, err
}
state.Validators = types.NewValidatorSet(vals)
state.NextValidators = types.NewValidatorSet(vals)
}
if res.ConsensusParams != nil {
state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams)
@@ -296,7 +305,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
sm.SaveState(h.stateDB, state)
}
// First handle edge cases and constraints on the storeBlockHeight
// First handle edge cases and constraints on the storeBlockHeight.
if storeBlockHeight == 0 {
return appHash, checkAppHash(state, appHash)
@@ -306,11 +315,11 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
} else if storeBlockHeight < stateBlockHeight {
// the state should never be ahead of the store (this is under tendermint's control)
cmn.PanicSanity(cmn.Fmt("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
cmn.PanicSanity(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
} else if storeBlockHeight > stateBlockHeight+1 {
// store should be at most one ahead of the state (this is under tendermint's control)
cmn.PanicSanity(cmn.Fmt("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
cmn.PanicSanity(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
}
var err error

View File

@@ -13,12 +13,12 @@ import (
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
const (
@@ -34,7 +34,7 @@ func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console
consensusState := newConsensusStateForReplay(config, csConfig)
if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil {
cmn.Exit(cmn.Fmt("Error during consensus replay: %v", err))
cmn.Exit(fmt.Sprintf("Error during consensus replay: %v", err))
}
}
@@ -58,7 +58,18 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
if err != nil {
return errors.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)
}
defer cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
defer func() {
// drain newStepCh to make sure we don't block
LOOP:
for {
select {
case <-newStepCh:
default:
break LOOP
}
}
cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
}()
// just open the file for reading, no need to use wal
fp, err := os.OpenFile(file, os.O_RDONLY, 0600)
@@ -221,7 +232,18 @@ func (pb *playback) replayConsoleLoop() int {
if err != nil {
cmn.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep))
}
defer pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
defer func() {
// drain newStepCh to make sure we don't block
LOOP:
for {
select {
case <-newStepCh:
default:
break LOOP
}
}
pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
}()
if len(tokens) == 1 {
if err := pb.replayReset(1, newStepCh); err != nil {
@@ -298,16 +320,21 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
// Create proxyAppConn connection (consensus, mempool, query)
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
proxyApp := proxy.NewAppConns(clientCreator,
NewHandshaker(stateDB, state, blockStore, gdoc))
proxyApp := proxy.NewAppConns(clientCreator)
err = proxyApp.Start()
if err != nil {
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
cmn.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err))
}
handshaker := NewHandshaker(stateDB, state, blockStore, gdoc)
err = handshaker.Handshake(proxyApp)
if err != nil {
cmn.Exit(fmt.Sprintf("Error on handshake: %v", err))
}
eventBus := types.NewEventBus()
if err := eventBus.Start(); err != nil {
cmn.Exit(cmn.Fmt("Failed to start event bus: %v", err))
cmn.Exit(fmt.Sprintf("Failed to start event bus: %v", err))
}
mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{}

View File

@@ -3,7 +3,6 @@ package consensus
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -20,15 +19,15 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
crypto "github.com/tendermint/tendermint/crypto"
auto "github.com/tendermint/tendermint/libs/autofile"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/version"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/libs/log"
)
var consensusReplayConfig *cfg.Config
@@ -104,14 +103,6 @@ func TestWALCrash(t *testing.T) {
{"empty block",
func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) {},
1},
{"block with a smaller part size",
func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) {
// XXX: is there a better way to change BlockPartSizeBytes?
cs.state.ConsensusParams.BlockPartSizeBytes = 512
sm.SaveState(stateDB, cs.state)
go sendTxs(cs, ctx)
},
1},
{"many non-empty blocks",
func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) {
go sendTxs(cs, ctx)
@@ -347,7 +338,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
t.Fatalf(err.Error())
}
stateDB, state, store := stateAndStore(config, privVal.GetPubKey())
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion)
store.chain = chain
store.commits = commits
@@ -361,22 +352,25 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
if nBlocks > 0 {
// run nBlocks against a new client to build up the app state.
// use a throwaway tendermint state
proxyApp := proxy.NewAppConns(clientCreator2, nil)
stateDB, state, _ := stateAndStore(config, privVal.GetPubKey())
proxyApp := proxy.NewAppConns(clientCreator2)
stateDB, state, _ := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion)
buildAppStateFromChain(proxyApp, stateDB, state, chain, nBlocks, mode)
}
// now start the app using the handshake - it should sync
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
handshaker := NewHandshaker(stateDB, state, store, genDoc)
proxyApp := proxy.NewAppConns(clientCreator2, handshaker)
proxyApp := proxy.NewAppConns(clientCreator2)
if err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
defer proxyApp.Stop()
if err := handshaker.Handshake(proxyApp); err != nil {
t.Fatalf("Error on abci handshake: %v", err)
}
// get the latest app hash from the app
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{""})
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: ""})
if err != nil {
t.Fatal(err)
}
@@ -399,7 +393,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
}
func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State {
testPartSize := st.ConsensusParams.BlockPartSizeBytes
testPartSize := types.BlockPartSizeBytes
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool)
blkID := types.BlockID{blk.Hash(), blk.MakePartSet(testPartSize).Header()}
@@ -418,7 +412,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
}
defer proxyApp.Stop()
validators := types.TM2PB.Validators(state.Validators)
validators := types.TM2PB.ValidatorUpdates(state.Validators)
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
Validators: validators,
}); err != nil {
@@ -449,13 +443,13 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, mode uint) sm.State {
// run the whole chain against this client to build up the tendermint state
clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "1")))
proxyApp := proxy.NewAppConns(clientCreator, nil) // sm.NewHandshaker(config, state, store, ReplayLastBlock))
proxyApp := proxy.NewAppConns(clientCreator)
if err := proxyApp.Start(); err != nil {
panic(err)
}
defer proxyApp.Stop()
validators := types.TM2PB.Validators(state.Validators)
validators := types.TM2PB.ValidatorUpdates(state.Validators)
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
Validators: validators,
}); err != nil {
@@ -494,7 +488,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
return nil, nil, err
}
if !found {
return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1))
return nil, nil, fmt.Errorf("WAL does not contain height %d.", 1)
}
defer gr.Close() // nolint: errcheck
@@ -526,16 +520,16 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
// if its not the first one, we have a full block
if thisBlockParts != nil {
var block = new(types.Block)
_, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0)
_, err = cdc.UnmarshalBinaryLengthPrefixedReader(thisBlockParts.GetReader(), block, 0)
if err != nil {
panic(err)
}
if block.Height != height+1 {
panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1))
panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1))
}
commitHeight := thisBlockCommit.Precommits[0].Height
if commitHeight != height+1 {
panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
}
blocks = append(blocks, block)
commits = append(commits, thisBlockCommit)
@@ -549,7 +543,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
return nil, nil, err
}
case *types.Vote:
if p.Type == types.VoteTypePrecommit {
if p.Type == types.PrecommitType {
thisBlockCommit = &types.Commit{
BlockID: p.BlockID,
Precommits: []*types.Vote{p},
@@ -559,16 +553,16 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
}
// grab the last block too
var block = new(types.Block)
_, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0)
_, err = cdc.UnmarshalBinaryLengthPrefixedReader(thisBlockParts.GetReader(), block, 0)
if err != nil {
panic(err)
}
if block.Height != height+1 {
panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1))
panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1))
}
commitHeight := thisBlockCommit.Precommits[0].Height
if commitHeight != height+1 {
panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
}
blocks = append(blocks, block)
commits = append(commits, thisBlockCommit)
@@ -581,7 +575,7 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} {
case msgInfo:
switch msg := m.Msg.(type) {
case *ProposalMessage:
return &msg.Proposal.BlockPartsHeader
return &msg.Proposal.BlockID.PartsHeader
case *BlockPartMessage:
return msg.Part
case *VoteMessage:
@@ -595,9 +589,10 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} {
}
// fresh state and mock store
func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (dbm.DB, sm.State, *mockBlockStore) {
func stateAndStore(config *cfg.Config, pubKey crypto.PubKey, appVersion version.Protocol) (dbm.DB, sm.State, *mockBlockStore) {
stateDB := dbm.NewMemDB()
state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile())
state.Version.Consensus.App = appVersion
store := NewMockBlockStore(config, state.ConsensusParams)
return stateDB, state, store
}
@@ -622,7 +617,7 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain
func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
block := bs.chain[height-1]
return &types.BlockMeta{
BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.params.BlockPartSizeBytes).Header()},
BlockID: types.BlockID{block.Hash(), block.MakePartSet(types.BlockPartSizeBytes).Header()},
Header: block.Header,
}
}
@@ -641,23 +636,26 @@ func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
func TestInitChainUpdateValidators(t *testing.T) {
val, _ := types.RandValidator(true, 10)
vals := types.NewValidatorSet([]*types.Validator{val})
app := &initChainApp{vals: types.TM2PB.Validators(vals)}
app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
clientCreator := proxy.NewLocalClientCreator(app)
config := ResetConfig("proxy_test_")
privVal := privval.LoadFilePV(config.PrivValidatorFile())
stateDB, state, store := stateAndStore(config, privVal.GetPubKey())
stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0)
oldValAddr := state.Validators.Validators[0].Address
// now start the app using the handshake - it should sync
genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile())
handshaker := NewHandshaker(stateDB, state, store, genDoc)
proxyApp := proxy.NewAppConns(clientCreator, handshaker)
proxyApp := proxy.NewAppConns(clientCreator)
if err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
defer proxyApp.Stop()
if err := handshaker.Handshake(proxyApp); err != nil {
t.Fatalf("Error on abci handshake: %v", err)
}
// reload the state, check the validator set was updated
state = sm.LoadState(stateDB)
@@ -668,7 +666,7 @@ func TestInitChainUpdateValidators(t *testing.T) {
assert.Equal(t, newValAddr, expectValAddr)
}
func newInitChainApp(vals []abci.Validator) *initChainApp {
func newInitChainApp(vals []abci.ValidatorUpdate) *initChainApp {
return &initChainApp{
vals: vals,
}
@@ -677,7 +675,7 @@ func newInitChainApp(vals []abci.Validator) *initChainApp {
// returns the vals on InitChain
type initChainApp struct {
abci.BaseApplication
vals []abci.Validator
vals []abci.ValidatorUpdate
}
func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain {

View File

@@ -9,9 +9,10 @@ import (
"sync"
"time"
fail "github.com/ebuchman/fail-test"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/fail"
"github.com/tendermint/tendermint/libs/log"
tmtime "github.com/tendermint/tendermint/types/time"
cfg "github.com/tendermint/tendermint/config"
cstypes "github.com/tendermint/tendermint/consensus/types"
@@ -74,7 +75,6 @@ type ConsensusState struct {
privValidator types.PrivValidator // for signing votes
// services for creating and executing blocks
// TODO: encapsulate all of this in one "BlockManager"
blockExec *sm.BlockExecutor
blockStore sm.BlockStore
mempool sm.Mempool
@@ -83,7 +83,8 @@ type ConsensusState struct {
// internal state
mtx sync.RWMutex
cstypes.RoundState
state sm.State // State until height-1.
triggeredTimeoutPrecommit bool
state sm.State // State until height-1.
// state changes may be triggered by: msgs from peers,
// msgs from ourself, or by timeouts
@@ -91,6 +92,10 @@ type ConsensusState struct {
internalMsgQueue chan msgInfo
timeoutTicker TimeoutTicker
// information about about added votes and block parts are written on this channel
// so statistics can be computed by reactor
statsMsgQueue chan msgInfo
// we use eventBus to trigger msg broadcasts in the reactor,
// and to notify external subscribers, eg. through a websocket
eventBus *types.EventBus
@@ -120,8 +125,8 @@ type ConsensusState struct {
metrics *Metrics
}
// CSOption sets an optional parameter on the ConsensusState.
type CSOption func(*ConsensusState)
// StateOption sets an optional parameter on the ConsensusState.
type StateOption func(*ConsensusState)
// NewConsensusState returns a new ConsensusState.
func NewConsensusState(
@@ -131,7 +136,7 @@ func NewConsensusState(
blockStore sm.BlockStore,
mempool sm.Mempool,
evpool sm.EvidencePool,
options ...CSOption,
options ...StateOption,
) *ConsensusState {
cs := &ConsensusState{
config: config,
@@ -141,6 +146,7 @@ func NewConsensusState(
peerMsgQueue: make(chan msgInfo, msgQueueSize),
internalMsgQueue: make(chan msgInfo, msgQueueSize),
timeoutTicker: NewTimeoutTicker(),
statsMsgQueue: make(chan msgInfo, msgQueueSize),
done: make(chan struct{}),
doWALCatchup: true,
wal: nilWAL{},
@@ -154,6 +160,7 @@ func NewConsensusState(
cs.setProposal = cs.defaultSetProposal
cs.updateToState(state)
// Don't call scheduleRound0 yet.
// We do that upon Start().
cs.reconstructLastCommit(state)
@@ -179,15 +186,15 @@ func (cs *ConsensusState) SetEventBus(b *types.EventBus) {
cs.blockExec.SetEventBus(b)
}
// WithMetrics sets the metrics.
func WithMetrics(metrics *Metrics) CSOption {
// StateMetrics sets the metrics.
func StateMetrics(metrics *Metrics) StateOption {
return func(cs *ConsensusState) { cs.metrics = metrics }
}
// String returns a string.
func (cs *ConsensusState) String() string {
// better not to access shared variables
return cmn.Fmt("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step)
return fmt.Sprintf("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step)
}
// GetState returns a copy of the chain state.
@@ -197,12 +204,19 @@ func (cs *ConsensusState) GetState() sm.State {
return cs.state.Copy()
}
// GetLastHeight returns the last height committed.
// If there were no blocks, returns 0.
func (cs *ConsensusState) GetLastHeight() int64 {
cs.mtx.RLock()
defer cs.mtx.RUnlock()
return cs.RoundState.Height - 1
}
// GetRoundState returns a shallow copy of the internal consensus state.
func (cs *ConsensusState) GetRoundState() *cstypes.RoundState {
cs.mtx.RLock()
defer cs.mtx.RUnlock()
rs := cs.RoundState // copy
cs.mtx.RUnlock()
return &rs
}
@@ -210,7 +224,6 @@ func (cs *ConsensusState) GetRoundState() *cstypes.RoundState {
func (cs *ConsensusState) GetRoundStateJSON() ([]byte, error) {
cs.mtx.RLock()
defer cs.mtx.RUnlock()
return cdc.MarshalJSON(cs.RoundState)
}
@@ -218,7 +231,6 @@ func (cs *ConsensusState) GetRoundStateJSON() ([]byte, error) {
func (cs *ConsensusState) GetRoundStateSimpleJSON() ([]byte, error) {
cs.mtx.RLock()
defer cs.mtx.RUnlock()
return cdc.MarshalJSON(cs.RoundState.RoundStateSimple())
}
@@ -232,15 +244,15 @@ func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) {
// SetPrivValidator sets the private validator account for signing votes.
func (cs *ConsensusState) SetPrivValidator(priv types.PrivValidator) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
cs.privValidator = priv
cs.mtx.Unlock()
}
// SetTimeoutTicker sets the local timer. It may be useful to overwrite for testing.
func (cs *ConsensusState) SetTimeoutTicker(timeoutTicker TimeoutTicker) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
cs.timeoutTicker = timeoutTicker
cs.mtx.Unlock()
}
// LoadCommit loads the commit for a given height.
@@ -413,8 +425,8 @@ func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType)
// enterNewRound(height, 0) at cs.StartTime.
func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) {
//cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime)
sleepDuration := rs.StartTime.Sub(time.Now()) // nolint: gotype, gosimple
//cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime)
sleepDuration := rs.StartTime.Sub(tmtime.Now()) // nolint: gotype, gosimple
cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight)
}
@@ -444,14 +456,14 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
return
}
seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight)
lastPrecommits := types.NewVoteSet(state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.VoteTypePrecommit, state.LastValidators)
lastPrecommits := types.NewVoteSet(state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.PrecommitType, state.LastValidators)
for _, precommit := range seenCommit.Precommits {
if precommit == nil {
continue
}
added, err := lastPrecommits.AddVote(precommit)
if !added || err != nil {
cmn.PanicCrisis(cmn.Fmt("Failed to reconstruct LastCommit: %v", err))
cmn.PanicCrisis(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err))
}
}
if !lastPrecommits.HasTwoThirdsMajority() {
@@ -464,13 +476,13 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
// The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight.
func (cs *ConsensusState) updateToState(state sm.State) {
if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight {
cmn.PanicSanity(cmn.Fmt("updateToState() expected state height of %v but found %v",
cmn.PanicSanity(fmt.Sprintf("updateToState() expected state height of %v but found %v",
cs.Height, state.LastBlockHeight))
}
if !cs.state.IsEmpty() && cs.state.LastBlockHeight+1 != cs.Height {
// This might happen when someone else is mutating cs.state.
// Someone forgot to pass in state.Copy() somewhere?!
cmn.PanicSanity(cmn.Fmt("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v",
cmn.PanicSanity(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v",
cs.state.LastBlockHeight+1, cs.Height))
}
@@ -507,7 +519,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
// to be gathered for the first block.
// And alternative solution that relies on clocks:
// cs.StartTime = state.LastBlockTime.Add(timeoutCommit)
cs.StartTime = cs.config.Commit(time.Now())
cs.StartTime = cs.config.Commit(tmtime.Now())
} else {
cs.StartTime = cs.config.Commit(cs.CommitTime)
}
@@ -516,10 +528,10 @@ func (cs *ConsensusState) updateToState(state sm.State) {
cs.Proposal = nil
cs.ProposalBlock = nil
cs.ProposalBlockParts = nil
cs.LockedRound = 0
cs.LockedRound = -1
cs.LockedBlock = nil
cs.LockedBlockParts = nil
cs.ValidRound = 0
cs.ValidRound = -1
cs.ValidBlock = nil
cs.ValidBlockParts = nil
cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators)
@@ -553,9 +565,30 @@ func (cs *ConsensusState) newStep() {
// Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities.
// ConsensusState must be locked before any internal state is updated.
func (cs *ConsensusState) receiveRoutine(maxSteps int) {
onExit := func(cs *ConsensusState) {
// NOTE: the internalMsgQueue may have signed messages from our
// priv_val that haven't hit the WAL, but its ok because
// priv_val tracks LastSig
// close wal now that we're done writing to it
cs.wal.Stop()
cs.wal.Wait()
close(cs.done)
}
defer func() {
if r := recover(); r != nil {
cs.Logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack()))
// stop gracefully
//
// NOTE: We most probably shouldn't be running any further when there is
// some unexpected panic. Some unknown error happened, and so we don't
// know if that will result in the validator signing an invalid thing. It
// might be worthwhile to explore a mechanism for manual resuming via
// some console or secure RPC system, but for now, halting the chain upon
// unexpected consensus bugs sounds like the better option.
onExit(cs)
}
}()
@@ -588,16 +621,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
// go to the next step
cs.handleTimeout(ti, rs)
case <-cs.Quit():
// NOTE: the internalMsgQueue may have signed messages from our
// priv_val that haven't hit the WAL, but its ok because
// priv_val tracks LastSig
// close wal now that we're done writing to it
cs.wal.Stop()
cs.wal.Wait()
close(cs.done)
onExit(cs)
return
}
}
@@ -617,7 +641,11 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
err = cs.setProposal(msg.Proposal)
case *BlockPartMessage:
// if the proposal is complete, we'll enterPrevote or tryFinalizeCommit
_, err = cs.addProposalBlockPart(msg, peerID)
added, err := cs.addProposalBlockPart(msg, peerID)
if added {
cs.statsMsgQueue <- mi
}
if err != nil && msg.Round != cs.Round {
cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round)
err = nil
@@ -625,7 +653,11 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
case *VoteMessage:
// attempt to add the vote and dupeout the validator if its a duplicate signature
// if the vote gives us a 2/3-any or 2/3-one, we transition
err := cs.tryAddVote(msg.Vote, peerID)
added, err := cs.tryAddVote(msg.Vote, peerID)
if added {
cs.statsMsgQueue <- mi
}
if err == ErrAddingVote {
// TODO: punish peer
// We probably don't want to stop the peer here. The vote does not
@@ -676,9 +708,10 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
cs.enterPrecommit(ti.Height, ti.Round)
case cstypes.RoundStepPrecommitWait:
cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent())
cs.enterPrecommit(ti.Height, ti.Round)
cs.enterNewRound(ti.Height, ti.Round+1)
default:
panic(cmn.Fmt("Invalid timeout step: %v", ti.Step))
panic(fmt.Sprintf("Invalid timeout step: %v", ti.Step))
}
}
@@ -704,15 +737,15 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
if now := time.Now(); cs.StartTime.After(now) {
if now := tmtime.Now(); cs.StartTime.After(now) {
logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now)
}
logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(fmt.Sprintf("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
// Increment validators if necessary
validators := cs.Validators
@@ -737,6 +770,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
cs.ProposalBlockParts = nil
}
cs.Votes.SetRound(round + 1) // also track next round (round+1) to allow round-skipping
cs.triggeredTimeoutPrecommit = false
cs.eventBus.PublishEventNewRound(cs.RoundStateEvent())
cs.metrics.Rounds.Set(float64(round))
@@ -747,7 +781,8 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
waitForTxs := cs.config.WaitForTxs() && round == 0 && !cs.needProofBlock(height)
if waitForTxs {
if cs.config.CreateEmptyBlocksInterval > 0 {
cs.scheduleTimeout(cs.config.EmptyBlocksInterval(), height, round, cstypes.RoundStepNewRound)
cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round,
cstypes.RoundStepNewRound)
}
go cs.proposalHeartbeat(height, round)
} else {
@@ -799,10 +834,10 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(fmt.Sprintf("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPropose:
@@ -850,10 +885,7 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
var blockParts *types.PartSet
// Decide on block
if cs.LockedBlock != nil {
// If we're locked onto a block, just choose that.
block, blockParts = cs.LockedBlock, cs.LockedBlockParts
} else if cs.ValidBlock != nil {
if cs.ValidBlock != nil {
// If there is valid block, choose that.
block, blockParts = cs.ValidBlock, cs.ValidBlockParts
} else {
@@ -865,15 +897,9 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
}
// Make proposal
polRound, polBlockID := cs.Votes.POLInfo()
proposal := types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID)
propBlockId := types.BlockID{block.Hash(), blockParts.Header()}
proposal := types.NewProposal(height, round, cs.ValidRound, propBlockId)
if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal); err == nil {
// Set fields
/* fields set by setProposal and addBlockPart
cs.Proposal = proposal
cs.ProposalBlock = block
cs.ProposalBlockParts = blockParts
*/
// send proposal and block parts on internal msg queue
cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""})
@@ -882,7 +908,7 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""})
}
cs.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal)
cs.Logger.Debug(cmn.Fmt("Signed proposal block: %v", block))
cs.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block))
} else {
if !cs.replayMode {
cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err)
@@ -926,21 +952,29 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
return
}
maxBytes := cs.state.ConsensusParams.BlockSize.MaxBytes
maxGas := cs.state.ConsensusParams.BlockSize.MaxGas
// bound evidence to 1/10th of the block
evidence := cs.evpool.PendingEvidence(types.MaxEvidenceBytesPerBlock(maxBytes))
// Mempool validated transactions
txs := cs.mempool.Reap(cs.state.ConsensusParams.BlockSize.MaxTxs)
evidence := cs.evpool.PendingEvidence()
block, parts := cs.state.MakeBlock(cs.Height, txs, commit, evidence)
txs := cs.mempool.ReapMaxBytesMaxGas(types.MaxDataBytes(
maxBytes,
cs.state.Validators.Size(),
len(evidence),
), maxGas)
proposerAddr := cs.privValidator.GetAddress()
block, parts := cs.state.MakeBlock(cs.Height, txs, commit, evidence, proposerAddr)
return block, parts
}
// Enter: `timeoutPropose` after entering Propose.
// Enter: proposal block and POL is ready.
// Enter: any +2/3 prevotes for future round.
// Prevote for LockedBlock if we're locked, or ProposalBlock if valid.
// Otherwise vote nil.
func (cs *ConsensusState) enterPrevote(height int64, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) {
cs.Logger.Debug(cmn.Fmt("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(fmt.Sprintf("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
@@ -950,15 +984,7 @@ func (cs *ConsensusState) enterPrevote(height int64, round int) {
cs.newStep()
}()
// fire event for how we got here
if cs.isProposalComplete() {
cs.eventBus.PublishEventCompleteProposal(cs.RoundStateEvent())
} else {
// we received +2/3 prevotes for a future round
// TODO: catchup event?
}
cs.Logger.Info(cmn.Fmt("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Info(fmt.Sprintf("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
// Sign and broadcast vote as necessary
cs.doPrevote(height, round)
@@ -969,17 +995,18 @@ func (cs *ConsensusState) enterPrevote(height int64, round int) {
func (cs *ConsensusState) defaultDoPrevote(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
// If a block is locked, prevote that.
if cs.LockedBlock != nil {
logger.Info("enterPrevote: Block was locked")
cs.signAddVote(types.VoteTypePrevote, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header())
cs.signAddVote(types.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header())
return
}
// If ProposalBlock is nil, prevote nil.
if cs.ProposalBlock == nil {
logger.Info("enterPrevote: ProposalBlock is nil")
cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{})
cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{})
return
}
@@ -988,7 +1015,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) {
if err != nil {
// ProposalBlock is invalid, prevote nil.
logger.Error("enterPrevote: ProposalBlock is invalid", "err", err)
cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{})
cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{})
return
}
@@ -996,7 +1023,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) {
// NOTE: the proposal signature is validated when it is received,
// and the proposal block parts are validated as they are received (against the merkle hash in the proposal)
logger.Info("enterPrevote: ProposalBlock is valid")
cs.signAddVote(types.VoteTypePrevote, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
cs.signAddVote(types.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
}
// Enter: any +2/3 prevotes at next round.
@@ -1004,13 +1031,13 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
if !cs.Votes.Prevotes(round).HasTwoThirdsAny() {
cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
cmn.PanicSanity(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
}
logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(fmt.Sprintf("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrevoteWait:
@@ -1023,8 +1050,8 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
}
// Enter: `timeoutPrevote` after any +2/3 prevotes.
// Enter: `timeoutPrecommit` after any +2/3 precommits.
// Enter: +2/3 precomits for block or nil.
// Enter: any +2/3 precommits for next round.
// Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round)
// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil,
// else, precommit nil otherwise.
@@ -1032,11 +1059,11 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(fmt.Sprintf("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrecommit:
@@ -1054,7 +1081,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
} else {
logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.")
}
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{})
return
}
@@ -1064,7 +1091,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
// the latest POLRound should be this round.
polRound, _ := cs.Votes.POLInfo()
if polRound < round {
cmn.PanicSanity(cmn.Fmt("This POLRound should be %v but got %", round, polRound))
cmn.PanicSanity(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound))
}
// +2/3 prevoted nil. Unlock and precommit nil.
@@ -1073,12 +1100,12 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
logger.Info("enterPrecommit: +2/3 prevoted for nil.")
} else {
logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking")
cs.LockedRound = 0
cs.LockedRound = -1
cs.LockedBlock = nil
cs.LockedBlockParts = nil
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
}
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{})
return
}
@@ -1089,7 +1116,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking")
cs.LockedRound = round
cs.eventBus.PublishEventRelock(cs.RoundStateEvent())
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
cs.signAddVote(types.PrecommitType, blockID.Hash, blockID.PartsHeader)
return
}
@@ -1098,13 +1125,13 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
// Validate the block.
if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil {
cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
cmn.PanicConsensus(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
}
cs.LockedRound = round
cs.LockedBlock = cs.ProposalBlock
cs.LockedBlockParts = cs.ProposalBlockParts
cs.eventBus.PublishEventLock(cs.RoundStateEvent())
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
cs.signAddVote(types.PrecommitType, blockID.Hash, blockID.PartsHeader)
return
}
@@ -1112,7 +1139,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
// Fetch that block, unlock, and precommit nil.
// The +2/3 prevotes for this round is the POL for our unlock.
// TODO: In the future save the POL prevotes for justification.
cs.LockedRound = 0
cs.LockedRound = -1
cs.LockedBlock = nil
cs.LockedBlockParts = nil
if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) {
@@ -1120,25 +1147,29 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader)
}
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{})
}
// Enter: any +2/3 precommits for next round.
func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) {
logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.triggeredTimeoutPrecommit) {
logger.Debug(
fmt.Sprintf(
"enterPrecommitWait(%v/%v): Invalid args. "+
"Current state is Height/Round: %v/%v/, triggeredTimeoutPrecommit:%v",
height, round, cs.Height, cs.Round, cs.triggeredTimeoutPrecommit))
return
}
if !cs.Votes.Precommits(round).HasTwoThirdsAny() {
cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
cmn.PanicSanity(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
}
logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(fmt.Sprintf("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrecommitWait:
cs.updateRoundStep(round, cstypes.RoundStepPrecommitWait)
cs.triggeredTimeoutPrecommit = true
cs.newStep()
}()
@@ -1152,17 +1183,17 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
logger := cs.Logger.With("height", height, "commitRound", commitRound)
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
return
}
logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
logger.Info(fmt.Sprintf("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterCommit:
// keep cs.Round the same, commitRound points to the right Precommits set.
cs.updateRoundStep(cs.Round, cstypes.RoundStepCommit)
cs.CommitRound = commitRound
cs.CommitTime = time.Now()
cs.CommitTime = tmtime.Now()
cs.newStep()
// Maybe finalize immediately.
@@ -1191,6 +1222,8 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
// Set up ProposalBlockParts and keep waiting.
cs.ProposalBlock = nil
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader)
cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent())
cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState)
} else {
// We just need to keep waiting.
}
@@ -1202,7 +1235,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) {
logger := cs.Logger.With("height", height)
if cs.Height != height {
cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
cmn.PanicSanity(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
}
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
@@ -1224,7 +1257,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) {
// Increment height and goto cstypes.RoundStepNewHeight
func (cs *ConsensusState) finalizeCommit(height int64) {
if cs.Height != height || cs.Step != cstypes.RoundStepCommit {
cs.Logger.Debug(cmn.Fmt("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(fmt.Sprintf("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step))
return
}
@@ -1232,21 +1265,21 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts
if !ok {
cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, commit does not have two thirds majority"))
cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority"))
}
if !blockParts.HasHeader(blockID.PartsHeader) {
cmn.PanicSanity(cmn.Fmt("Expected ProposalBlockParts header to be commit header"))
cmn.PanicSanity(fmt.Sprintf("Expected ProposalBlockParts header to be commit header"))
}
if !block.HashesTo(blockID.Hash) {
cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
}
if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil {
cmn.PanicConsensus(cmn.Fmt("+2/3 committed an invalid block: %v", err))
cmn.PanicConsensus(fmt.Sprintf("+2/3 committed an invalid block: %v", err))
}
cs.Logger.Info(cmn.Fmt("Finalizing commit of block with %d txs", block.NumTxs),
cs.Logger.Info(fmt.Sprintf("Finalizing commit of block with %d txs", block.NumTxs),
"height", block.Height, "hash", block.Hash(), "root", block.AppHash)
cs.Logger.Info(cmn.Fmt("%v", block))
cs.Logger.Info(fmt.Sprintf("%v", block))
fail.Fail() // XXX
@@ -1345,7 +1378,7 @@ func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) {
if height > 1 {
lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1)
cs.metrics.BlockIntervalSeconds.Observe(
cs.metrics.BlockIntervalSeconds.Set(
block.Time.Sub(lastBlockMeta.Header.Time).Seconds(),
)
}
@@ -1353,6 +1386,8 @@ func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) {
cs.metrics.NumTxs.Set(float64(block.NumTxs))
cs.metrics.BlockSizeBytes.Set(float64(block.Size()))
cs.metrics.TotalTxs.Set(float64(block.TotalTxs))
cs.metrics.CommittedHeight.Set(float64(block.Height))
}
//-----------------------------------------------------------------------------
@@ -1369,14 +1404,9 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
return nil
}
// We don't care about the proposal if we're already in cstypes.RoundStepCommit.
if cstypes.RoundStepCommit <= cs.Step {
return nil
}
// Verify POLRound, which must be -1 or between 0 and proposal.Round exclusive.
if proposal.POLRound != -1 &&
(proposal.POLRound < 0 || proposal.Round <= proposal.POLRound) {
// Verify POLRound, which must be -1 or in range [0, proposal.Round).
if proposal.POLRound < -1 ||
(proposal.POLRound >= 0 && proposal.POLRound >= proposal.Round) {
return ErrInvalidProposalPOLRound
}
@@ -1386,7 +1416,12 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
}
cs.Proposal = proposal
cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockPartsHeader)
// We don't update cs.ProposalBlockParts if it is already set.
// This happens if we're already in cstypes.RoundStepCommit or if there is a valid block in the current round.
// TODO: We can check if Proposal is for a different block as this is a sign of misbehavior!
if cs.ProposalBlockParts == nil {
cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartsHeader)
}
cs.Logger.Info("Received proposal", "proposal", proposal)
return nil
}
@@ -1417,12 +1452,17 @@ func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p
}
if added && cs.ProposalBlockParts.IsComplete() {
// Added and completed!
_, err = cdc.UnmarshalBinaryReader(cs.ProposalBlockParts.GetReader(), &cs.ProposalBlock, int64(cs.state.ConsensusParams.BlockSize.MaxBytes))
_, err = cdc.UnmarshalBinaryLengthPrefixedReader(
cs.ProposalBlockParts.GetReader(),
&cs.ProposalBlock,
int64(cs.state.ConsensusParams.BlockSize.MaxBytes),
)
if err != nil {
return true, err
return added, err
}
// NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal
cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
cs.eventBus.PublishEventCompleteProposal(cs.RoundStateEvent())
// Update Valid* if we can.
prevotes := cs.Votes.Prevotes(cs.Round)
@@ -1445,39 +1485,42 @@ func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p
if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() {
// Move onto the next step
cs.enterPrevote(height, cs.Round)
if hasTwoThirds { // this is optimisation as this will be triggered when prevote is added
cs.enterPrecommit(height, cs.Round)
}
} else if cs.Step == cstypes.RoundStepCommit {
// If we're waiting on the proposal block...
cs.tryFinalizeCommit(height)
}
return true, nil
return added, nil
}
return added, nil
}
// Attempt to add the vote. if its a duplicate signature, dupeout the validator
func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) error {
_, err := cs.addVote(vote, peerID)
func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) {
added, err := cs.addVote(vote, peerID)
if err != nil {
// If the vote height is off, we'll just ignore it,
// But if it's a conflicting sig, add it to the cs.evpool.
// If it's otherwise invalid, punish peer.
if err == ErrVoteHeightMismatch {
return err
return added, err
} else if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok {
if bytes.Equal(vote.ValidatorAddress, cs.privValidator.GetAddress()) {
cs.Logger.Error("Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, "type", vote.Type)
return err
return added, err
}
cs.evpool.AddEvidence(voteErr.DuplicateVoteEvidence)
return err
return added, err
} else {
// Probably an invalid signature / Bad peer.
// Seems this can also err sometimes with "Unexpected step" - perhaps not from a bad peer ?
cs.Logger.Error("Error attempting to add vote", "err", err)
return ErrAddingVote
return added, ErrAddingVote
}
}
return nil
return added, nil
}
//-----------------------------------------------------------------------------
@@ -1488,7 +1531,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
// A precommit for the previous height?
// These come in while we wait timeoutCommit
if vote.Height+1 == cs.Height {
if !(cs.Step == cstypes.RoundStepNewHeight && vote.Type == types.VoteTypePrecommit) {
if !(cs.Step == cstypes.RoundStepNewHeight && vote.Type == types.PrecommitType) {
// TODO: give the reason ..
// fmt.Errorf("tryAddVote: Wrong height, not a LastCommit straggler commit.")
return added, ErrVoteHeightMismatch
@@ -1498,7 +1541,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
return added, err
}
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
cs.Logger.Info(fmt.Sprintf("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
cs.evsw.FireEvent(types.EventVote, vote)
@@ -1531,7 +1574,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
cs.evsw.FireEvent(types.EventVote, vote)
switch vote.Type {
case types.VoteTypePrevote:
case types.PrevoteType:
prevotes := cs.Votes.Prevotes(vote.Round)
cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort())
@@ -1550,7 +1593,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
!cs.LockedBlock.HashesTo(blockID.Hash) {
cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round)
cs.LockedRound = 0
cs.LockedRound = -1
cs.LockedBlock = nil
cs.LockedBlockParts = nil
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
@@ -1558,27 +1601,38 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
// Update Valid* if we can.
// NOTE: our proposal block may be nil or not what received a polka..
// TODO: we may want to still update the ValidBlock and obtain it via gossipping
if !blockID.IsZero() &&
(cs.ValidRound < vote.Round) &&
(vote.Round <= cs.Round) &&
cs.ProposalBlock.HashesTo(blockID.Hash) {
if len(blockID.Hash) != 0 && (cs.ValidRound < vote.Round) && (vote.Round == cs.Round) {
cs.Logger.Info("Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round)
cs.ValidRound = vote.Round
cs.ValidBlock = cs.ProposalBlock
cs.ValidBlockParts = cs.ProposalBlockParts
if cs.ProposalBlock.HashesTo(blockID.Hash) {
cs.Logger.Info(
"Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round)
cs.ValidRound = vote.Round
cs.ValidBlock = cs.ProposalBlock
cs.ValidBlockParts = cs.ProposalBlockParts
} else {
cs.Logger.Info(
"Valid block we don't know about. Set ProposalBlock=nil",
"proposal", cs.ProposalBlock.Hash(), "blockId", blockID.Hash)
// We're getting the wrong block.
cs.ProposalBlock = nil
}
if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) {
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader)
}
cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState)
cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent())
}
}
// If +2/3 prevotes for *anything* for this or future round:
if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() {
// Round-skip over to PrevoteWait or goto Precommit.
cs.enterNewRound(height, vote.Round) // if the vote is ahead of us
if prevotes.HasTwoThirdsMajority() {
// If +2/3 prevotes for *anything* for future round:
if cs.Round < vote.Round && prevotes.HasTwoThirdsAny() {
// Round-skip if there is any 2/3+ of votes ahead of us
cs.enterNewRound(height, vote.Round)
} else if cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step { // current round
blockID, ok := prevotes.TwoThirdsMajority()
if ok && (cs.isProposalComplete() || len(blockID.Hash) == 0) {
cs.enterPrecommit(height, vote.Round)
} else {
cs.enterPrevote(height, vote.Round) // if the vote is ahead of us
} else if prevotes.HasTwoThirdsAny() {
cs.enterPrevoteWait(height, vote.Round)
}
} else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round {
@@ -1588,47 +1642,45 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
}
}
case types.VoteTypePrecommit:
case types.PrecommitType:
precommits := cs.Votes.Precommits(vote.Round)
cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort())
blockID, ok := precommits.TwoThirdsMajority()
if ok {
if len(blockID.Hash) == 0 {
cs.enterNewRound(height, vote.Round+1)
} else {
cs.enterNewRound(height, vote.Round)
cs.enterPrecommit(height, vote.Round)
// Executed as TwoThirdsMajority could be from a higher round
cs.enterNewRound(height, vote.Round)
cs.enterPrecommit(height, vote.Round)
if len(blockID.Hash) != 0 {
cs.enterCommit(height, vote.Round)
if cs.config.SkipTimeoutCommit && precommits.HasAll() {
// if we have all the votes now,
// go straight to new round (skip timeout commit)
// cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight)
cs.enterNewRound(cs.Height, 0)
}
} else {
cs.enterPrecommitWait(height, vote.Round)
}
} else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() {
cs.enterNewRound(height, vote.Round)
cs.enterPrecommit(height, vote.Round)
cs.enterPrecommitWait(height, vote.Round)
}
default:
panic(cmn.Fmt("Unexpected vote type %X", vote.Type)) // go-wire should prevent this.
panic(fmt.Sprintf("Unexpected vote type %X", vote.Type)) // go-wire should prevent this.
}
return
}
func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
func (cs *ConsensusState) signVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
addr := cs.privValidator.GetAddress()
valIndex, _ := cs.Validators.GetByAddress(addr)
vote := &types.Vote{
ValidatorAddress: addr,
ValidatorIndex: valIndex,
Height: cs.Height,
Round: cs.Round,
Timestamp: time.Now().UTC(),
Timestamp: cs.voteTime(),
Type: type_,
BlockID: types.BlockID{hash, header},
}
@@ -1636,8 +1688,25 @@ func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSet
return vote, err
}
func (cs *ConsensusState) voteTime() time.Time {
now := tmtime.Now()
minVoteTime := now
// TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil,
// even if cs.LockedBlock != nil. See https://github.com/tendermint/spec.
if cs.LockedBlock != nil {
minVoteTime = cs.config.MinValidVoteTime(cs.LockedBlock.Time)
} else if cs.ProposalBlock != nil {
minVoteTime = cs.config.MinValidVoteTime(cs.ProposalBlock.Time)
}
if now.After(minVoteTime) {
return now
}
return minVoteTime
}
// sign the vote and publish on internalMsgQueue
func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.PartSetHeader) *types.Vote {
func (cs *ConsensusState) signAddVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote {
// if we don't have a key or we're not in the validator set, do nothing
if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
return nil

File diff suppressed because it is too large Load Diff

View File

@@ -6,9 +6,9 @@ import (
"strings"
"sync"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
type RoundVoteSet struct {
@@ -99,8 +99,8 @@ func (hvs *HeightVoteSet) addRound(round int) {
cmn.PanicSanity("addRound() for an existing round")
}
// log.Debug("addRound(round)", "round", round)
prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.VoteTypePrevote, hvs.valSet)
precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.VoteTypePrecommit, hvs.valSet)
prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrevoteType, hvs.valSet)
precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrecommitType, hvs.valSet)
hvs.roundVoteSets[round] = RoundVoteSet{
Prevotes: prevotes,
Precommits: precommits,
@@ -134,13 +134,13 @@ func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID) (added bool,
func (hvs *HeightVoteSet) Prevotes(round int) *types.VoteSet {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
return hvs.getVoteSet(round, types.VoteTypePrevote)
return hvs.getVoteSet(round, types.PrevoteType)
}
func (hvs *HeightVoteSet) Precommits(round int) *types.VoteSet {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
return hvs.getVoteSet(round, types.VoteTypePrecommit)
return hvs.getVoteSet(round, types.PrecommitType)
}
// Last round and blockID that has +2/3 prevotes for a particular block or nil.
@@ -149,7 +149,7 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int, polBlockID types.BlockID) {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
for r := hvs.round; r >= 0; r-- {
rvs := hvs.getVoteSet(r, types.VoteTypePrevote)
rvs := hvs.getVoteSet(r, types.PrevoteType)
polBlockID, ok := rvs.TwoThirdsMajority()
if ok {
return r, polBlockID
@@ -158,18 +158,18 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int, polBlockID types.BlockID) {
return -1, types.BlockID{}
}
func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet {
func (hvs *HeightVoteSet) getVoteSet(round int, type_ types.SignedMsgType) *types.VoteSet {
rvs, ok := hvs.roundVoteSets[round]
if !ok {
return nil
}
switch type_ {
case types.VoteTypePrevote:
case types.PrevoteType:
return rvs.Prevotes
case types.VoteTypePrecommit:
case types.PrecommitType:
return rvs.Precommits
default:
cmn.PanicSanity(cmn.Fmt("Unexpected vote type %X", type_))
cmn.PanicSanity(fmt.Sprintf("Unexpected vote type %X", type_))
return nil
}
}
@@ -178,7 +178,7 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet {
// NOTE: if there are too many peers, or too much peer churn,
// this can cause memory issues.
// TODO: implement ability to remove peers too
func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID p2p.ID, blockID types.BlockID) error {
func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ types.SignedMsgType, peerID p2p.ID, blockID types.BlockID) error {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
if !types.IsVoteTypeValid(type_) {
@@ -219,7 +219,7 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string {
voteSetString = roundVoteSet.Precommits.StringShort()
vsStrings = append(vsStrings, voteSetString)
}
return cmn.Fmt(`HeightVoteSet{H:%v R:0~%v
return fmt.Sprintf(`HeightVoteSet{H:%v R:0~%v
%s %v
%s}`,
hvs.height, hvs.round,

View File

@@ -1,12 +1,12 @@
package types
import (
"fmt"
"testing"
"time"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
tmtime "github.com/tendermint/tendermint/types/time"
)
var config *cfg.Config // NOTE: must be reset for each _test.go file
@@ -55,14 +55,14 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali
ValidatorIndex: valIndex,
Height: height,
Round: round,
Timestamp: time.Now().UTC(),
Type: types.VoteTypePrecommit,
Timestamp: tmtime.Now(),
Type: types.PrecommitType,
BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}},
}
chainID := config.ChainID()
err := privVal.SignVote(chainID, vote)
if err != nil {
panic(cmn.Fmt("Error signing vote: %v", err))
panic(fmt.Sprintf("Error signing vote: %v", err))
return nil
}
return vote

View File

@@ -4,8 +4,8 @@ import (
"fmt"
"time"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/types"
)
//-----------------------------------------------------------------------------

View File

@@ -5,8 +5,8 @@ import (
"fmt"
"time"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/types"
)
//-----------------------------------------------------------------------------
@@ -26,8 +26,15 @@ const (
RoundStepPrecommitWait = RoundStepType(0x07) // Did receive any +2/3 precommits, start timeout
RoundStepCommit = RoundStepType(0x08) // Entered commit state machine
// NOTE: RoundStepNewHeight acts as RoundStepCommitWait.
// NOTE: Update IsValid method if you change this!
)
// IsValid returns true if the step is valid, false if unknown/undefined.
func (rs RoundStepType) IsValid() bool {
return uint8(rs) >= 0x01 && uint8(rs) <= 0x08
}
// String returns a string
func (rs RoundStepType) String() string {
switch rs {
@@ -107,8 +114,8 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple {
// RoundStateEvent returns the H/R/S of the RoundState as an event.
func (rs *RoundState) RoundStateEvent() types.EventDataRoundState {
// XXX: copy the RoundState
// if we want to avoid this, we may need synchronous events after all
// copy the RoundState.
// TODO: if we want to avoid this, we may need synchronous events after all
rsCopy := *rs
edrs := types.EventDataRoundState{
Height: rs.Height,

View File

@@ -2,12 +2,12 @@ package types
import (
"testing"
"time"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto/ed25519"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
func BenchmarkRoundStateDeepCopy(b *testing.B) {
@@ -23,11 +23,11 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
Hash: cmn.RandBytes(20),
},
}
sig := ed25519.SignatureEd25519{}
sig := make([]byte, ed25519.SignatureSize)
for i := 0; i < nval; i++ {
precommits[i] = &types.Vote{
ValidatorAddress: types.Address(cmn.RandBytes(20)),
Timestamp: time.Now(),
Timestamp: tmtime.Now(),
BlockID: blockID,
Signature: sig,
}
@@ -40,7 +40,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
block := &types.Block{
Header: types.Header{
ChainID: cmn.RandStr(12),
Time: time.Now(),
Time: tmtime.Now(),
LastBlockID: blockID,
LastCommitHash: cmn.RandBytes(20),
DataHash: cmn.RandBytes(20),
@@ -62,19 +62,16 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
parts := block.MakePartSet(4096)
// Random Proposal
proposal := &types.Proposal{
Timestamp: time.Now(),
BlockPartsHeader: types.PartSetHeader{
Hash: cmn.RandBytes(20),
},
POLBlockID: blockID,
Signature: sig,
Timestamp: tmtime.Now(),
BlockID: blockID,
Signature: sig,
}
// Random HeightVoteSet
// TODO: hvs :=
rs := &RoundState{
StartTime: time.Now(),
CommitTime: time.Now(),
StartTime: tmtime.Now(),
CommitTime: tmtime.Now(),
Validators: vset,
Proposal: proposal,
ProposalBlock: block,

View File

@@ -1,14 +1,12 @@
package types
import (
"github.com/tendermint/go-amino"
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/types"
)
var cdc = amino.NewCodec()
func init() {
cryptoAmino.RegisterAmino(cdc)
types.RegisterEvidences(cdc)
types.RegisterBlockAmino(cdc)
}

View File

@@ -1,13 +0,0 @@
package consensus
import (
cmn "github.com/tendermint/tendermint/libs/common"
)
// kind of arbitrary
var Spec = "1" // async
var Major = "0" //
var Minor = "2" // replay refactor
var Revision = "2" // validation -> commit
var Version = cmn.Fmt("v%s/%s.%s.%s", Spec, Major, Minor, Revision)

View File

@@ -11,13 +11,15 @@ import (
"github.com/pkg/errors"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/types"
auto "github.com/tendermint/tendermint/libs/autofile"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
const (
// must be greater than params.BlockGossip.BlockPartSizeBytes + a few bytes
// must be greater than types.BlockPartSizeBytes + a few bytes
maxMsgSizeBytes = 1024 * 1024 // 1MB
)
@@ -72,13 +74,13 @@ type baseWAL struct {
enc *WALEncoder
}
func NewWAL(walFile string) (*baseWAL, error) {
func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*baseWAL, error) {
err := cmn.EnsureDir(filepath.Dir(walFile), 0700)
if err != nil {
return nil, errors.Wrap(err, "failed to ensure WAL directory is in place")
}
group, err := auto.OpenGroup(walFile)
group, err := auto.OpenGroup(walFile, groupOptions...)
if err != nil {
return nil, err
}
@@ -94,6 +96,11 @@ func (wal *baseWAL) Group() *auto.Group {
return wal.group
}
func (wal *baseWAL) SetLogger(l log.Logger) {
wal.BaseService.Logger = l
wal.group.SetLogger(l)
}
func (wal *baseWAL) OnStart() error {
size, err := wal.group.Head.Size()
if err != nil {
@@ -119,8 +126,8 @@ func (wal *baseWAL) Write(msg WALMessage) {
}
// Write the wal message
if err := wal.enc.Encode(&TimedWALMessage{time.Now(), msg}); err != nil {
panic(cmn.Fmt("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg))
if err := wal.enc.Encode(&TimedWALMessage{tmtime.Now(), msg}); err != nil {
panic(fmt.Sprintf("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg))
}
}
@@ -134,7 +141,7 @@ func (wal *baseWAL) WriteSync(msg WALMessage) {
wal.Write(msg)
if err := wal.group.Flush(); err != nil {
panic(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
panic(fmt.Sprintf("Error flushing consensus wal buf to file. Error: %v \n", err))
}
}

View File

@@ -4,6 +4,7 @@ import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
@@ -13,22 +14,21 @@ import (
"github.com/tendermint/tendermint/abci/example/kvstore"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
auto "github.com/tendermint/tendermint/libs/autofile"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
// WALWithNBlocks generates a consensus WAL. It does this by spining up a
// WALGenerateNBlocks generates a consensus WAL. It does this by spining up a
// stripped down version of node (proxy app, event bus, consensus state) with a
// persistent kvstore application and special consensus wal instance
// (byteBufferWAL) and waits until numBlocks are created. Then it returns a WAL
// content. If the node fails to produce given numBlocks, it returns an error.
func WALWithNBlocks(numBlocks int) (data []byte, err error) {
// (byteBufferWAL) and waits until numBlocks are created. If the node fails to produce given numBlocks, it returns an error.
func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) {
config := getConfig()
app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator"))
@@ -38,31 +38,33 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
/////////////////////////////////////////////////////////////////////////////
// COPY PASTE FROM node.go WITH A FEW MODIFICATIONS
// NOTE: we can't import node package because of circular dependency
// NOTE: we can't import node package because of circular dependency.
// NOTE: we don't do handshake so need to set state.Version.Consensus.App directly.
privValidatorFile := config.PrivValidatorFile()
privValidator := privval.LoadOrGenFilePV(privValidatorFile)
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
if err != nil {
return nil, errors.Wrap(err, "failed to read genesis file")
return errors.Wrap(err, "failed to read genesis file")
}
stateDB := db.NewMemDB()
blockStoreDB := db.NewMemDB()
state, err := sm.MakeGenesisState(genDoc)
if err != nil {
return nil, errors.Wrap(err, "failed to make genesis state")
return errors.Wrap(err, "failed to make genesis state")
}
state.Version.Consensus.App = kvstore.ProtocolVersion
blockStore := bc.NewBlockStore(blockStoreDB)
handshaker := NewHandshaker(stateDB, state, blockStore, genDoc)
proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker)
proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app))
proxyApp.SetLogger(logger.With("module", "proxy"))
if err := proxyApp.Start(); err != nil {
return nil, errors.Wrap(err, "failed to start proxy app connections")
return errors.Wrap(err, "failed to start proxy app connections")
}
defer proxyApp.Stop()
eventBus := types.NewEventBus()
eventBus.SetLogger(logger.With("module", "events"))
if err := eventBus.Start(); err != nil {
return nil, errors.Wrap(err, "failed to start event bus")
return errors.Wrap(err, "failed to start event bus")
}
defer eventBus.Stop()
mempool := sm.MockMempool{}
@@ -78,8 +80,6 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
/////////////////////////////////////////////////////////////////////////////
// set consensus wal to buffered WAL, which will write all incoming msgs to buffer
var b bytes.Buffer
wr := bufio.NewWriter(&b)
numBlocksWritten := make(chan struct{})
wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten)
// see wal.go#103
@@ -87,20 +87,32 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
consensusState.wal = wal
if err := consensusState.Start(); err != nil {
return nil, errors.Wrap(err, "failed to start consensus state")
return errors.Wrap(err, "failed to start consensus state")
}
select {
case <-numBlocksWritten:
consensusState.Stop()
wr.Flush()
return b.Bytes(), nil
return nil
case <-time.After(1 * time.Minute):
consensusState.Stop()
return []byte{}, fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks)
return fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks)
}
}
//WALWithNBlocks returns a WAL content with numBlocks.
func WALWithNBlocks(numBlocks int) (data []byte, err error) {
var b bytes.Buffer
wr := bufio.NewWriter(&b)
if err := WALGenerateNBlocks(wr, numBlocks); err != nil {
return []byte{}, err
}
wr.Flush()
return b.Bytes(), nil
}
// f**ing long, but unique for each test
func makePathname() string {
// get path

View File

@@ -3,20 +3,71 @@ package consensus
import (
"bytes"
"crypto/rand"
"fmt"
"io/ioutil"
"os"
"path/filepath"
// "sync"
"testing"
"time"
"github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/libs/autofile"
tmtypes "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestWALTruncate(t *testing.T) {
walDir, err := ioutil.TempDir("", "wal")
if err != nil {
panic(fmt.Errorf("failed to create temp WAL file: %v", err))
}
defer os.RemoveAll(walDir)
walFile := filepath.Join(walDir, "wal")
//this magic number 4K can truncate the content when RotateFile. defaultHeadSizeLimit(10M) is hard to simulate.
//this magic number 1 * time.Millisecond make RotateFile check frequently. defaultGroupCheckDuration(5s) is hard to simulate.
wal, err := NewWAL(walFile, autofile.GroupHeadSizeLimit(4096), autofile.GroupCheckDuration(1*time.Millisecond))
if err != nil {
t.Fatal(err)
}
wal.Start()
defer wal.Stop()
//60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), when headBuf is full, truncate content will Flush to the file.
//at this time, RotateFile is called, truncate content exist in each file.
err = WALGenerateNBlocks(wal.Group(), 60)
if err != nil {
t.Fatal(err)
}
time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run
wal.Group().Flush()
h := int64(50)
gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{})
assert.NoError(t, err, fmt.Sprintf("expected not to err on height %d", h))
assert.True(t, found, fmt.Sprintf("expected to find end height for %d", h))
assert.NotNil(t, gr, "expected group not to be nil")
defer gr.Close()
dec := NewWALDecoder(gr)
msg, err := dec.Decode()
assert.NoError(t, err, "expected to decode a message")
rs, ok := msg.Msg.(tmtypes.EventDataRoundState)
assert.True(t, ok, "expected message of type EventDataRoundState")
assert.Equal(t, rs.Height, h+1, fmt.Sprintf("wrong height"))
}
func TestWALEncoderDecoder(t *testing.T) {
now := time.Now()
now := tmtime.Now()
msgs := []TimedWALMessage{
TimedWALMessage{Time: now, Msg: EndHeightMessage{0}},
TimedWALMessage{Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}},
@@ -54,8 +105,8 @@ func TestWALSearchForEndHeight(t *testing.T) {
h := int64(3)
gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{})
assert.NoError(t, err, cmn.Fmt("expected not to err on height %d", h))
assert.True(t, found, cmn.Fmt("expected to find end height for %d", h))
assert.NoError(t, err, fmt.Sprintf("expected not to err on height %d", h))
assert.True(t, found, fmt.Sprintf("expected to find end height for %d", h))
assert.NotNil(t, gr, "expected group not to be nil")
defer gr.Close()
@@ -64,7 +115,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
assert.NoError(t, err, "expected to decode a message")
rs, ok := msg.Msg.(tmtypes.EventDataRoundState)
assert.True(t, ok, "expected message of type EventDataRoundState")
assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height"))
assert.Equal(t, rs.Height, h+1, fmt.Sprintf("wrong height"))
}
/*
@@ -93,7 +144,7 @@ func benchmarkWalDecode(b *testing.B, n int) {
enc := NewWALEncoder(buf)
data := nBytes(n)
enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second)})
enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()})
encoded := buf.Bytes()

View File

@@ -2,7 +2,7 @@ package consensus
import (
"github.com/tendermint/go-amino"
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
"github.com/tendermint/tendermint/types"
)
var cdc = amino.NewCodec()
@@ -10,5 +10,5 @@ var cdc = amino.NewCodec()
func init() {
RegisterConsensusMessages(cdc)
RegisterWALMessages(cdc)
cryptoAmino.RegisterAmino(cdc)
types.RegisterBlockAmino(cdc)
}

View File

@@ -24,9 +24,7 @@ crypto `.Bytes()` uses Amino:binary encoding, but Amino:JSON is also supported.
Example Amino:JSON encodings:
ed25519.PrivKeyEd25519 - {"type":"954568A3288910","value":"EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="}
crypto.SignatureEd25519 - {"type":"6BF5903DA1DB28","value":"77sQNZOrf7ltExpf7AV1WaYPCHbyRLgjBsoWVzcduuLk+jIGmYk+s5R6Emm29p12HeiNAuhUJgdFGmwkpeGJCA=="}
ed25519.PubKeyEd25519 - {"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="}
crypto.PrivKeySecp256k1 - {"type":"019E82E1B0F798","value":"zx4Pnh67N+g2V+5vZbQzEyRerX9c4ccNZOVzM9RvJ0Y="}
crypto.SignatureSecp256k1 - {"type":"6D1EA416E1FEE8","value":"MEUCIQCIg5TqS1l7I+MKTrSPIuUN2+4m5tA29dcauqn3NhEJ2wIgICaZ+lgRc5aOTVahU/XoLopXKn8BZcl0bnuYWLvohR8="}
crypto.PubKeySecp256k1 - {"type":"F8CCEAEB5AE980","value":"A8lPKJXcNl5VHt1FK8a244K9EJuS4WX1hFBnwisi0IJx"}
```

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"io/ioutil"
"golang.org/x/crypto/openpgp/armor"
"golang.org/x/crypto/openpgp/armor" // forked to github.com/tendermint/crypto
)
func EncodeArmor(blockType string, headers map[string]string, data []byte) string {

View File

@@ -1,32 +1,36 @@
package crypto
import (
"github.com/tendermint/tendermint/crypto/tmhash"
cmn "github.com/tendermint/tendermint/libs/common"
)
type PrivKey interface {
Bytes() []byte
Sign(msg []byte) (Signature, error)
PubKey() PubKey
Equals(PrivKey) bool
}
const (
// AddressSize is the size of a pubkey address.
AddressSize = tmhash.TruncatedSize
)
// An address is a []byte, but hex-encoded even in JSON.
// []byte leaves us the option to change the address length.
// Use an alias so Unmarshal methods (with ptr receivers) are available too.
type Address = cmn.HexBytes
func AddressHash(bz []byte) Address {
return Address(tmhash.SumTruncated(bz))
}
type PubKey interface {
Address() Address
Bytes() []byte
VerifyBytes(msg []byte, sig Signature) bool
VerifyBytes(msg []byte, sig []byte) bool
Equals(PubKey) bool
}
type Signature interface {
type PrivKey interface {
Bytes() []byte
IsZero() bool
Equals(Signature) bool
Sign(msg []byte) ([]byte, error)
PubKey() PubKey
Equals(PrivKey) bool
}
type Symmetric interface {

View File

@@ -0,0 +1,26 @@
package ed25519
import (
"io"
"testing"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/internal/benchmarking"
)
func BenchmarkKeyGeneration(b *testing.B) {
benchmarkKeygenWrapper := func(reader io.Reader) crypto.PrivKey {
return genPrivKey(reader)
}
benchmarking.BenchmarkKeyGeneration(b, benchmarkKeygenWrapper)
}
func BenchmarkSigning(b *testing.B) {
priv := GenPrivKey()
benchmarking.BenchmarkSigning(b, priv)
}
func BenchmarkVerification(b *testing.B) {
priv := GenPrivKey()
benchmarking.BenchmarkVerification(b, priv)
}

View File

@@ -4,13 +4,13 @@ import (
"bytes"
"crypto/subtle"
"fmt"
"io"
"github.com/tendermint/ed25519"
"github.com/tendermint/ed25519/extra25519"
amino "github.com/tendermint/go-amino"
"golang.org/x/crypto/ed25519" // forked to github.com/tendermint/crypto
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash"
cmn "github.com/tendermint/tendermint/libs/common"
)
//-------------------------------------
@@ -18,9 +18,11 @@ import (
var _ crypto.PrivKey = PrivKeyEd25519{}
const (
Ed25519PrivKeyAminoRoute = "tendermint/PrivKeyEd25519"
Ed25519PubKeyAminoRoute = "tendermint/PubKeyEd25519"
Ed25519SignatureAminoRoute = "tendermint/SignatureEd25519"
PrivKeyAminoRoute = "tendermint/PrivKeyEd25519"
PubKeyAminoRoute = "tendermint/PubKeyEd25519"
// Size of an Edwards25519 signature. Namely the size of a compressed
// Edwards25519 point, and a field element. Both of which are 32 bytes.
SignatureSize = 64
)
var cdc = amino.NewCodec()
@@ -28,15 +30,11 @@ var cdc = amino.NewCodec()
func init() {
cdc.RegisterInterface((*crypto.PubKey)(nil), nil)
cdc.RegisterConcrete(PubKeyEd25519{},
Ed25519PubKeyAminoRoute, nil)
PubKeyAminoRoute, nil)
cdc.RegisterInterface((*crypto.PrivKey)(nil), nil)
cdc.RegisterConcrete(PrivKeyEd25519{},
Ed25519PrivKeyAminoRoute, nil)
cdc.RegisterInterface((*crypto.Signature)(nil), nil)
cdc.RegisterConcrete(SignatureEd25519{},
Ed25519SignatureAminoRoute, nil)
PrivKeyAminoRoute, nil)
}
// PrivKeyEd25519 implements crypto.PrivKey.
@@ -48,10 +46,15 @@ func (privKey PrivKeyEd25519) Bytes() []byte {
}
// Sign produces a signature on the provided message.
func (privKey PrivKeyEd25519) Sign(msg []byte) (crypto.Signature, error) {
privKeyBytes := [64]byte(privKey)
signatureBytes := ed25519.Sign(&privKeyBytes, msg)
return SignatureEd25519(*signatureBytes), nil
// This assumes the privkey is wellformed in the golang format.
// The first 32 bytes should be random,
// corresponding to the normal ed25519 private key.
// The latter 32 bytes should be the compressed public key.
// If these conditions aren't met, Sign will panic or produce an
// incorrect signature.
func (privKey PrivKeyEd25519) Sign(msg []byte) ([]byte, error) {
signatureBytes := ed25519.Sign(privKey[:], msg)
return signatureBytes[:], nil
}
// PubKey gets the corresponding public key from the private key.
@@ -67,14 +70,14 @@ func (privKey PrivKeyEd25519) PubKey() crypto.PubKey {
break
}
}
if initialized {
var pubkeyBytes [PubKeyEd25519Size]byte
copy(pubkeyBytes[:], privKeyBytes[32:])
return PubKeyEd25519(pubkeyBytes)
if !initialized {
panic("Expected PrivKeyEd25519 to include concatenated pubkey bytes")
}
pubBytes := *ed25519.MakePublicKey(&privKeyBytes)
return PubKeyEd25519(pubBytes)
var pubkeyBytes [PubKeyEd25519Size]byte
copy(pubkeyBytes[:], privKeyBytes[32:])
return PubKeyEd25519(pubkeyBytes)
}
// Equals - you probably don't need to use this.
@@ -87,28 +90,25 @@ func (privKey PrivKeyEd25519) Equals(other crypto.PrivKey) bool {
}
}
// ToCurve25519 takes a private key and returns its representation on
// Curve25519. Curve25519 is birationally equivalent to Edwards25519,
// which Ed25519 uses internally. This method is intended for use in
// an X25519 Diffie Hellman key exchange.
func (privKey PrivKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte {
keyCurve25519 := new([32]byte)
privKeyBytes := [64]byte(privKey)
extra25519.PrivateKeyToCurve25519(keyCurve25519, &privKeyBytes)
return keyCurve25519
}
// GenPrivKey generates a new ed25519 private key.
// It uses OS randomness in conjunction with the current global random seed
// in tendermint/libs/common to generate the private key.
func GenPrivKey() PrivKeyEd25519 {
privKey := new([64]byte)
copy(privKey[:32], crypto.CRandBytes(32))
// ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey.
// It places the pubkey in the last 32 bytes of privKey, and returns the
// public key.
ed25519.MakePublicKey(privKey)
return PrivKeyEd25519(*privKey)
return genPrivKey(crypto.CReader())
}
// genPrivKey generates a new ed25519 private key using the provided reader.
func genPrivKey(rand io.Reader) PrivKeyEd25519 {
seed := make([]byte, 32)
_, err := io.ReadFull(rand, seed[:])
if err != nil {
panic(err)
}
privKey := ed25519.NewKeyFromSeed(seed)
var privKeyEd PrivKeyEd25519
copy(privKeyEd[:], privKey)
return privKeyEd
}
// GenPrivKeyFromSecret hashes the secret with SHA2, and uses
@@ -116,14 +116,12 @@ func GenPrivKey() PrivKeyEd25519 {
// NOTE: secret should be the output of a KDF like bcrypt,
// if it's derived from user input.
func GenPrivKeyFromSecret(secret []byte) PrivKeyEd25519 {
privKey32 := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes.
privKey := new([64]byte)
copy(privKey[:32], privKey32)
// ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey.
// It places the pubkey in the last 32 bytes of privKey, and returns the
// public key.
ed25519.MakePublicKey(privKey)
return PrivKeyEd25519(*privKey)
seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes.
privKey := ed25519.NewKeyFromSeed(seed)
var privKeyEd PrivKeyEd25519
copy(privKeyEd[:], privKey)
return privKeyEd
}
//-------------------------------------
@@ -138,7 +136,7 @@ type PubKeyEd25519 [PubKeyEd25519Size]byte
// Address is the SHA256-20 of the raw pubkey bytes.
func (pubKey PubKeyEd25519) Address() crypto.Address {
return crypto.Address(tmhash.Sum(pubKey[:]))
return crypto.Address(tmhash.SumTruncated(pubKey[:]))
}
// Bytes marshals the PubKey using amino encoding.
@@ -150,30 +148,12 @@ func (pubKey PubKeyEd25519) Bytes() []byte {
return bz
}
func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ crypto.Signature) bool {
func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig []byte) bool {
// make sure we use the same algorithm to sign
sig, ok := sig_.(SignatureEd25519)
if !ok {
if len(sig) != SignatureSize {
return false
}
pubKeyBytes := [PubKeyEd25519Size]byte(pubKey)
sigBytes := [SignatureEd25519Size]byte(sig)
return ed25519.Verify(&pubKeyBytes, msg, &sigBytes)
}
// ToCurve25519 takes a public key and returns its representation on
// Curve25519. Curve25519 is birationally equivalent to Edwards25519,
// which Ed25519 uses internally. This method is intended for use in
// an X25519 Diffie Hellman key exchange.
//
// If there is an error, then this function returns nil.
func (pubKey PubKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte {
keyCurve25519, pubKeyBytes := new([PubKeyEd25519Size]byte), [PubKeyEd25519Size]byte(pubKey)
ok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes)
if !ok {
return nil
}
return keyCurve25519
return ed25519.Verify(pubKey[:], msg, sig)
}
func (pubKey PubKeyEd25519) String() string {
@@ -188,40 +168,3 @@ func (pubKey PubKeyEd25519) Equals(other crypto.PubKey) bool {
return false
}
}
//-------------------------------------
var _ crypto.Signature = SignatureEd25519{}
// Size of an Edwards25519 signature. Namely the size of a compressed
// Edwards25519 point, and a field element. Both of which are 32 bytes.
const SignatureEd25519Size = 64
// SignatureEd25519 implements crypto.Signature
type SignatureEd25519 [SignatureEd25519Size]byte
func (sig SignatureEd25519) Bytes() []byte {
bz, err := cdc.MarshalBinaryBare(sig)
if err != nil {
panic(err)
}
return bz
}
func (sig SignatureEd25519) IsZero() bool { return len(sig) == 0 }
func (sig SignatureEd25519) String() string { return fmt.Sprintf("/%X.../", cmn.Fingerprint(sig[:])) }
func (sig SignatureEd25519) Equals(other crypto.Signature) bool {
if otherEd, ok := other.(SignatureEd25519); ok {
return subtle.ConstantTimeCompare(sig[:], otherEd[:]) == 1
} else {
return false
}
}
func SignatureEd25519FromBytes(data []byte) crypto.Signature {
var sig SignatureEd25519
copy(sig[:], data)
return sig
}

View File

@@ -23,9 +23,7 @@ func TestSignAndValidateEd25519(t *testing.T) {
// Mutate the signature, just one bit.
// TODO: Replace this with a much better fuzzer, tendermint/ed25519/issues/10
sigEd := sig.(ed25519.SignatureEd25519)
sigEd[7] ^= byte(0x01)
sig = sigEd
sig[7] ^= byte(0x01)
assert.False(t, pubKey.VerifyBytes(msg, sig))
}

View File

@@ -1,14 +1,23 @@
package cryptoAmino
import (
"reflect"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/multisig"
"github.com/tendermint/tendermint/crypto/secp256k1"
)
var cdc = amino.NewCodec()
// routeTable is used to map public key concrete types back
// to their amino routes. This should eventually be handled
// by amino. Example usage:
// routeTable[reflect.TypeOf(ed25519.PubKeyEd25519{})] = ed25519.PubKeyAminoRoute
var routeTable = make(map[reflect.Type]string, 3)
func init() {
// NOTE: It's important that there be no conflicts here,
// as that would change the canonical representations,
@@ -17,6 +26,20 @@ func init() {
// https://github.com/tendermint/go-amino/issues/9
// is resolved
RegisterAmino(cdc)
// TODO: Have amino provide a way to go from concrete struct to route directly.
// Its currently a private API
routeTable[reflect.TypeOf(ed25519.PubKeyEd25519{})] = ed25519.PubKeyAminoRoute
routeTable[reflect.TypeOf(secp256k1.PubKeySecp256k1{})] = secp256k1.PubKeyAminoRoute
routeTable[reflect.TypeOf(&multisig.PubKeyMultisigThreshold{})] = multisig.PubKeyMultisigThresholdAminoRoute
}
// PubkeyAminoRoute returns the amino route of a pubkey
// cdc is currently passed in, as eventually this will not be using
// a package level codec.
func PubkeyAminoRoute(cdc *amino.Codec, key crypto.PubKey) (string, bool) {
route, found := routeTable[reflect.TypeOf(key)]
return route, found
}
// RegisterAmino registers all crypto related types in the given (amino) codec.
@@ -24,21 +47,17 @@ func RegisterAmino(cdc *amino.Codec) {
// These are all written here instead of
cdc.RegisterInterface((*crypto.PubKey)(nil), nil)
cdc.RegisterConcrete(ed25519.PubKeyEd25519{},
"tendermint/PubKeyEd25519", nil)
ed25519.PubKeyAminoRoute, nil)
cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{},
"tendermint/PubKeySecp256k1", nil)
secp256k1.PubKeyAminoRoute, nil)
cdc.RegisterConcrete(multisig.PubKeyMultisigThreshold{},
multisig.PubKeyMultisigThresholdAminoRoute, nil)
cdc.RegisterInterface((*crypto.PrivKey)(nil), nil)
cdc.RegisterConcrete(ed25519.PrivKeyEd25519{},
"tendermint/PrivKeyEd25519", nil)
ed25519.PrivKeyAminoRoute, nil)
cdc.RegisterConcrete(secp256k1.PrivKeySecp256k1{},
"tendermint/PrivKeySecp256k1", nil)
cdc.RegisterInterface((*crypto.Signature)(nil), nil)
cdc.RegisterConcrete(ed25519.SignatureEd25519{},
"tendermint/SignatureEd25519", nil)
cdc.RegisterConcrete(secp256k1.SignatureSecp256k1{},
"tendermint/SignatureSecp256k1", nil)
secp256k1.PrivKeyAminoRoute, nil)
}
func PrivKeyFromBytes(privKeyBytes []byte) (privKey crypto.PrivKey, err error) {
@@ -50,8 +69,3 @@ func PubKeyFromBytes(pubKeyBytes []byte) (pubKey crypto.PubKey, err error) {
err = cdc.UnmarshalBinaryBare(pubKeyBytes, &pubKey)
return
}
func SignatureFromBytes(pubKeyBytes []byte) (pubKey crypto.Signature, err error) {
err = cdc.UnmarshalBinaryBare(pubKeyBytes, &pubKey)
return
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/multisig"
"github.com/tendermint/tendermint/crypto/secp256k1"
)
@@ -15,12 +16,14 @@ type byter interface {
Bytes() []byte
}
func checkAminoBinary(t *testing.T, src byter, dst interface{}, size int) {
func checkAminoBinary(t *testing.T, src, dst interface{}, size int) {
// Marshal to binary bytes.
bz, err := cdc.MarshalBinaryBare(src)
require.Nil(t, err, "%+v", err)
// Make sure this is compatible with current (Bytes()) encoding.
assert.Equal(t, src.Bytes(), bz, "Amino binary vs Bytes() mismatch")
if byterSrc, ok := src.(byter); ok {
// Make sure this is compatible with current (Bytes()) encoding.
assert.Equal(t, byterSrc.Bytes(), bz, "Amino binary vs Bytes() mismatch")
}
// Make sure we have the expected length.
if size != -1 {
assert.Equal(t, size, len(bz), "Amino binary size mismatch")
@@ -51,26 +54,27 @@ func ExamplePrintRegisteredTypes() {
//| ---- | ---- | ------ | ----- | ------ |
//| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | |
//| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | |
//| PubKeyMultisigThreshold | tendermint/PubKeyMultisigThreshold | 0x22C1F7E2 | variable | |
//| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | |
//| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | |
//| SignatureEd25519 | tendermint/SignatureEd25519 | 0x2031EA53 | 0x40 | |
//| SignatureSecp256k1 | tendermint/SignatureSecp256k1 | 0x7FC4A495 | variable | |
}
func TestKeyEncodings(t *testing.T) {
cases := []struct {
privKey crypto.PrivKey
privSize, pubSize int // binary sizes
privKey crypto.PrivKey
privSize, pubSize, sigSize int // binary sizes
}{
{
privKey: ed25519.GenPrivKey(),
privSize: 69,
pubSize: 37,
sigSize: 65,
},
{
privKey: secp256k1.GenPrivKey(),
privSize: 37,
pubSize: 38,
sigSize: 65,
},
}
@@ -84,13 +88,11 @@ func TestKeyEncodings(t *testing.T) {
assert.EqualValues(t, tc.privKey, priv3, "tc #%d", tcIndex)
// Check (de/en)codings of Signatures.
var sig1, sig2, sig3 crypto.Signature
var sig1, sig2 []byte
sig1, err := tc.privKey.Sign([]byte("something"))
assert.NoError(t, err, "tc #%d", tcIndex)
checkAminoBinary(t, sig1, &sig2, -1) // Signature size changes for Secp anyways.
checkAminoBinary(t, sig1, &sig2, tc.sigSize)
assert.EqualValues(t, sig1, sig2, "tc #%d", tcIndex)
checkAminoJSON(t, sig1, &sig3, false) // TODO also check Prefix bytes.
assert.EqualValues(t, sig1, sig3, "tc #%d", tcIndex)
// Check (de/en)codings of PubKeys.
pubKey := tc.privKey.PubKey()
@@ -105,7 +107,7 @@ func TestKeyEncodings(t *testing.T) {
func TestNilEncodings(t *testing.T) {
// Check nil Signature.
var a, b crypto.Signature
var a, b []byte
checkAminoJSON(t, &a, &b, true)
assert.EqualValues(t, a, b)
@@ -118,11 +120,29 @@ func TestNilEncodings(t *testing.T) {
var e, f crypto.PrivKey
checkAminoJSON(t, &e, &f, true)
assert.EqualValues(t, e, f)
}
func TestPubKeyInvalidDataProperReturnsEmpty(t *testing.T) {
pk, err := PubKeyFromBytes([]byte("foo"))
require.NotNil(t, err, "expecting a non-nil error")
require.Nil(t, pk, "expecting an empty public key on error")
require.NotNil(t, err)
require.Nil(t, pk)
}
func TestPubkeyAminoRoute(t *testing.T) {
tests := []struct {
key crypto.PubKey
want string
found bool
}{
{ed25519.PubKeyEd25519{}, ed25519.PubKeyAminoRoute, true},
{secp256k1.PubKeySecp256k1{}, secp256k1.PubKeyAminoRoute, true},
{&multisig.PubKeyMultisigThreshold{}, multisig.PubKeyMultisigThresholdAminoRoute, true},
}
for i, tc := range tests {
got, found := PubkeyAminoRoute(cdc, tc.key)
require.Equal(t, tc.found, found, "not equal on tc %d", i)
if tc.found {
require.Equal(t, tc.want, got, "not equal on tc %d", i)
}
}
}

View File

@@ -3,7 +3,7 @@ package crypto
import (
"crypto/sha256"
"golang.org/x/crypto/ripemd160"
"golang.org/x/crypto/ripemd160" // forked to github.com/tendermint/crypto
)
func Sha256(bytes []byte) []byte {

View File

@@ -1,106 +0,0 @@
// Package hkdfchacha20poly1305 creates an AEAD using hkdf, chacha20, and poly1305
// When sealing and opening, the hkdf is used to obtain the nonce and subkey for
// chacha20. Other than the change for the how the subkey and nonce for chacha
// are obtained, this is the same as chacha20poly1305
package hkdfchacha20poly1305
import (
"crypto/cipher"
"crypto/sha256"
"errors"
"io"
"golang.org/x/crypto/chacha20poly1305"
"golang.org/x/crypto/hkdf"
)
// Implements crypto.AEAD
type hkdfchacha20poly1305 struct {
key [KeySize]byte
}
const (
// KeySize is the size of the key used by this AEAD, in bytes.
KeySize = 32
// NonceSize is the size of the nonce used with this AEAD, in bytes.
NonceSize = 24
// TagSize is the size added from poly1305
TagSize = 16
// MaxPlaintextSize is the max size that can be passed into a single call of Seal
MaxPlaintextSize = (1 << 38) - 64
// MaxCiphertextSize is the max size that can be passed into a single call of Open,
// this differs from plaintext size due to the tag
MaxCiphertextSize = (1 << 38) - 48
// HkdfInfo is the parameter used internally for Hkdf's info parameter.
HkdfInfo = "TENDERMINT_SECRET_CONNECTION_FRAME_KEY_DERIVE"
)
//New xChaChapoly1305 AEAD with 24 byte nonces
func New(key []byte) (cipher.AEAD, error) {
if len(key) != KeySize {
return nil, errors.New("chacha20poly1305: bad key length")
}
ret := new(hkdfchacha20poly1305)
copy(ret.key[:], key)
return ret, nil
}
func (c *hkdfchacha20poly1305) NonceSize() int {
return NonceSize
}
func (c *hkdfchacha20poly1305) Overhead() int {
return TagSize
}
func (c *hkdfchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
if len(nonce) != NonceSize {
panic("hkdfchacha20poly1305: bad nonce length passed to Seal")
}
if uint64(len(plaintext)) > MaxPlaintextSize {
panic("hkdfchacha20poly1305: plaintext too large")
}
subKey, chachaNonce := getSubkeyAndChachaNonceFromHkdf(&c.key, &nonce)
aead, err := chacha20poly1305.New(subKey[:])
if err != nil {
panic("hkdfchacha20poly1305: failed to initialize chacha20poly1305")
}
return aead.Seal(dst, chachaNonce[:], plaintext, additionalData)
}
func (c *hkdfchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
if len(nonce) != NonceSize {
return nil, errors.New("hkdfchacha20poly1305: bad nonce length passed to Open")
}
if uint64(len(ciphertext)) > MaxCiphertextSize {
return nil, errors.New("hkdfchacha20poly1305: ciphertext too large")
}
subKey, chachaNonce := getSubkeyAndChachaNonceFromHkdf(&c.key, &nonce)
aead, err := chacha20poly1305.New(subKey[:])
if err != nil {
panic("hkdfchacha20poly1305: failed to initialize chacha20poly1305")
}
return aead.Open(dst, chachaNonce[:], ciphertext, additionalData)
}
func getSubkeyAndChachaNonceFromHkdf(cKey *[32]byte, nonce *[]byte) (
subKey [KeySize]byte, chachaNonce [chacha20poly1305.NonceSize]byte) {
hash := sha256.New
hkdf := hkdf.New(hash, (*cKey)[:], *nonce, []byte(HkdfInfo))
_, err := io.ReadFull(hkdf, subKey[:])
if err != nil {
panic("hkdfchacha20poly1305: failed to read subkey from hkdf")
}
_, err = io.ReadFull(hkdf, chachaNonce[:])
if err != nil {
panic("hkdfchacha20poly1305: failed to read chachaNonce from hkdf")
}
return
}

View File

@@ -0,0 +1,88 @@
package benchmarking
import (
"io"
"testing"
"github.com/tendermint/tendermint/crypto"
)
// The code in this file is adapted from agl/ed25519.
// As such it is under the following license.
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found at the bottom of this file.
type zeroReader struct{}
func (zeroReader) Read(buf []byte) (int, error) {
for i := range buf {
buf[i] = 0
}
return len(buf), nil
}
// BenchmarkKeyGeneration benchmarks the given key generation algorithm using
// a dummy reader.
func BenchmarkKeyGeneration(b *testing.B, GenerateKey func(reader io.Reader) crypto.PrivKey) {
var zero zeroReader
for i := 0; i < b.N; i++ {
GenerateKey(zero)
}
}
// BenchmarkSigning benchmarks the given signing algorithm using
// the provided privkey.
func BenchmarkSigning(b *testing.B, priv crypto.PrivKey) {
message := []byte("Hello, world!")
b.ResetTimer()
for i := 0; i < b.N; i++ {
priv.Sign(message)
}
}
// BenchmarkVerification benchmarks the given verification algorithm using
// the provided privkey on a constant message.
func BenchmarkVerification(b *testing.B, priv crypto.PrivKey) {
pub := priv.PubKey()
// use a short message, so this time doesn't get dominated by hashing.
message := []byte("Hello, world!")
signature, err := priv.Sign(message)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
pub.VerifyBytes(message, signature)
}
}
// Below is the aforementioned license.
// Copyright (c) 2012 The Go Authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

6
crypto/merkle/compile.sh Normal file
View File

@@ -0,0 +1,6 @@
#! /bin/bash
protoc --gogo_out=. -I $GOPATH/src/ -I . -I $GOPATH/src/github.com/gogo/protobuf/protobuf merkle.proto
echo "--> adding nolint declarations to protobuf generated files"
awk '/package merkle/ { print "//nolint: gas"; print; next }1' merkle.pb.go > merkle.pb.go.new
mv merkle.pb.go.new merkle.pb.go

792
crypto/merkle/merkle.pb.go Normal file
View File

@@ -0,0 +1,792 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: crypto/merkle/merkle.proto
package merkle
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import bytes "bytes"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// ProofOp defines an operation used for calculating Merkle root
// The data could be arbitrary format, providing nessecary data
// for example neighbouring node hash
type ProofOp struct {
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProofOp) Reset() { *m = ProofOp{} }
func (m *ProofOp) String() string { return proto.CompactTextString(m) }
func (*ProofOp) ProtoMessage() {}
func (*ProofOp) Descriptor() ([]byte, []int) {
return fileDescriptor_merkle_5d3f6051907285da, []int{0}
}
func (m *ProofOp) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProofOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ProofOp.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ProofOp) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProofOp.Merge(dst, src)
}
func (m *ProofOp) XXX_Size() int {
return m.Size()
}
func (m *ProofOp) XXX_DiscardUnknown() {
xxx_messageInfo_ProofOp.DiscardUnknown(m)
}
var xxx_messageInfo_ProofOp proto.InternalMessageInfo
func (m *ProofOp) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func (m *ProofOp) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *ProofOp) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
// Proof is Merkle proof defined by the list of ProofOps
type Proof struct {
Ops []ProofOp `protobuf:"bytes,1,rep,name=ops" json:"ops"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Proof) Reset() { *m = Proof{} }
func (m *Proof) String() string { return proto.CompactTextString(m) }
func (*Proof) ProtoMessage() {}
func (*Proof) Descriptor() ([]byte, []int) {
return fileDescriptor_merkle_5d3f6051907285da, []int{1}
}
func (m *Proof) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Proof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Proof.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Proof) XXX_Merge(src proto.Message) {
xxx_messageInfo_Proof.Merge(dst, src)
}
func (m *Proof) XXX_Size() int {
return m.Size()
}
func (m *Proof) XXX_DiscardUnknown() {
xxx_messageInfo_Proof.DiscardUnknown(m)
}
var xxx_messageInfo_Proof proto.InternalMessageInfo
func (m *Proof) GetOps() []ProofOp {
if m != nil {
return m.Ops
}
return nil
}
func init() {
proto.RegisterType((*ProofOp)(nil), "merkle.ProofOp")
proto.RegisterType((*Proof)(nil), "merkle.Proof")
}
func (this *ProofOp) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*ProofOp)
if !ok {
that2, ok := that.(ProofOp)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.Type != that1.Type {
return false
}
if !bytes.Equal(this.Key, that1.Key) {
return false
}
if !bytes.Equal(this.Data, that1.Data) {
return false
}
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
return false
}
return true
}
func (this *Proof) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*Proof)
if !ok {
that2, ok := that.(Proof)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if len(this.Ops) != len(that1.Ops) {
return false
}
for i := range this.Ops {
if !this.Ops[i].Equal(&that1.Ops[i]) {
return false
}
}
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
return false
}
return true
}
func (m *ProofOp) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ProofOp) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Type) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintMerkle(dAtA, i, uint64(len(m.Type)))
i += copy(dAtA[i:], m.Type)
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintMerkle(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Data) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintMerkle(dAtA, i, uint64(len(m.Data)))
i += copy(dAtA[i:], m.Data)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Proof) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Proof) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Ops) > 0 {
for _, msg := range m.Ops {
dAtA[i] = 0xa
i++
i = encodeVarintMerkle(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func encodeVarintMerkle(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func NewPopulatedProofOp(r randyMerkle, easy bool) *ProofOp {
this := &ProofOp{}
this.Type = string(randStringMerkle(r))
v1 := r.Intn(100)
this.Key = make([]byte, v1)
for i := 0; i < v1; i++ {
this.Key[i] = byte(r.Intn(256))
}
v2 := r.Intn(100)
this.Data = make([]byte, v2)
for i := 0; i < v2; i++ {
this.Data[i] = byte(r.Intn(256))
}
if !easy && r.Intn(10) != 0 {
this.XXX_unrecognized = randUnrecognizedMerkle(r, 4)
}
return this
}
func NewPopulatedProof(r randyMerkle, easy bool) *Proof {
this := &Proof{}
if r.Intn(10) != 0 {
v3 := r.Intn(5)
this.Ops = make([]ProofOp, v3)
for i := 0; i < v3; i++ {
v4 := NewPopulatedProofOp(r, easy)
this.Ops[i] = *v4
}
}
if !easy && r.Intn(10) != 0 {
this.XXX_unrecognized = randUnrecognizedMerkle(r, 2)
}
return this
}
type randyMerkle interface {
Float32() float32
Float64() float64
Int63() int64
Int31() int32
Uint32() uint32
Intn(n int) int
}
func randUTF8RuneMerkle(r randyMerkle) rune {
ru := r.Intn(62)
if ru < 10 {
return rune(ru + 48)
} else if ru < 36 {
return rune(ru + 55)
}
return rune(ru + 61)
}
func randStringMerkle(r randyMerkle) string {
v5 := r.Intn(100)
tmps := make([]rune, v5)
for i := 0; i < v5; i++ {
tmps[i] = randUTF8RuneMerkle(r)
}
return string(tmps)
}
func randUnrecognizedMerkle(r randyMerkle, maxFieldNumber int) (dAtA []byte) {
l := r.Intn(5)
for i := 0; i < l; i++ {
wire := r.Intn(4)
if wire == 3 {
wire = 5
}
fieldNumber := maxFieldNumber + r.Intn(100)
dAtA = randFieldMerkle(dAtA, r, fieldNumber, wire)
}
return dAtA
}
func randFieldMerkle(dAtA []byte, r randyMerkle, fieldNumber int, wire int) []byte {
key := uint32(fieldNumber)<<3 | uint32(wire)
switch wire {
case 0:
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key))
v6 := r.Int63()
if r.Intn(2) == 0 {
v6 *= -1
}
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(v6))
case 1:
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key))
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
case 2:
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key))
ll := r.Intn(100)
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(ll))
for j := 0; j < ll; j++ {
dAtA = append(dAtA, byte(r.Intn(256)))
}
default:
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key))
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
}
return dAtA
}
func encodeVarintPopulateMerkle(dAtA []byte, v uint64) []byte {
for v >= 1<<7 {
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
v >>= 7
}
dAtA = append(dAtA, uint8(v))
return dAtA
}
func (m *ProofOp) Size() (n int) {
var l int
_ = l
l = len(m.Type)
if l > 0 {
n += 1 + l + sovMerkle(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovMerkle(uint64(l))
}
l = len(m.Data)
if l > 0 {
n += 1 + l + sovMerkle(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Proof) Size() (n int) {
var l int
_ = l
if len(m.Ops) > 0 {
for _, e := range m.Ops {
l = e.Size()
n += 1 + l + sovMerkle(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovMerkle(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozMerkle(x uint64) (n int) {
return sovMerkle(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *ProofOp) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMerkle
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ProofOp: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ProofOp: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMerkle
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMerkle
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMerkle
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthMerkle
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMerkle
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthMerkle
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
if m.Data == nil {
m.Data = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMerkle(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMerkle
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Proof) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMerkle
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Proof: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Proof: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ops", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMerkle
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMerkle
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ops = append(m.Ops, ProofOp{})
if err := m.Ops[len(m.Ops)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMerkle(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMerkle
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMerkle(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMerkle
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMerkle
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMerkle
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthMerkle
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMerkle
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipMerkle(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthMerkle = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMerkle = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("crypto/merkle/merkle.proto", fileDescriptor_merkle_5d3f6051907285da) }
var fileDescriptor_merkle_5d3f6051907285da = []byte{
// 200 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0x2e, 0xaa, 0x2c,
0x28, 0xc9, 0xd7, 0xcf, 0x4d, 0x2d, 0xca, 0xce, 0x49, 0x85, 0x52, 0x7a, 0x05, 0x45, 0xf9, 0x25,
0xf9, 0x42, 0x6c, 0x10, 0x9e, 0x94, 0x6e, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e,
0xae, 0x7e, 0x7a, 0x7e, 0x7a, 0xbe, 0x3e, 0x58, 0x3a, 0xa9, 0x34, 0x0d, 0xcc, 0x03, 0x73, 0xc0,
0x2c, 0x88, 0x36, 0x25, 0x67, 0x2e, 0xf6, 0x80, 0xa2, 0xfc, 0xfc, 0x34, 0xff, 0x02, 0x21, 0x21,
0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0x48,
0x80, 0x8b, 0x39, 0x3b, 0xb5, 0x52, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc4, 0x04, 0xa9,
0x4a, 0x49, 0x2c, 0x49, 0x94, 0x60, 0x06, 0x0b, 0x81, 0xd9, 0x4a, 0x06, 0x5c, 0xac, 0x60, 0x43,
0x84, 0xd4, 0xb9, 0x98, 0xf3, 0x0b, 0x8a, 0x25, 0x18, 0x15, 0x98, 0x35, 0xb8, 0x8d, 0xf8, 0xf5,
0xa0, 0x0e, 0x84, 0x5a, 0xe0, 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x85, 0x93, 0xc8,
0x8f, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c,
0xe3, 0x83, 0x47, 0x72, 0x8c, 0x49, 0x6c, 0x60, 0x37, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
0xb9, 0x2b, 0x0f, 0xd1, 0xe8, 0x00, 0x00, 0x00,
}

View File

@@ -0,0 +1,30 @@
syntax = "proto3";
package merkle;
// For more information on gogo.proto, see:
// https://github.com/gogo/protobuf/blob/master/extensions.md
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.populate_all) = true;
option (gogoproto.equal_all) = true;
//----------------------------------------
// Message types
// ProofOp defines an operation used for calculating Merkle root
// The data could be arbitrary format, providing nessecary data
// for example neighbouring node hash
message ProofOp {
string type = 1;
bytes key = 2;
bytes data = 3;
}
// Proof is Merkle proof defined by the list of ProofOps
message Proof {
repeated ProofOp ops = 1 [(gogoproto.nullable)=false];
}

138
crypto/merkle/proof.go Normal file
View File

@@ -0,0 +1,138 @@
package merkle
import (
"bytes"
cmn "github.com/tendermint/tendermint/libs/common"
)
//----------------------------------------
// ProofOp gets converted to an instance of ProofOperator:
// ProofOperator is a layer for calculating intermediate Merkle roots
// when a series of Merkle trees are chained together.
// Run() takes leaf values from a tree and returns the Merkle
// root for the corresponding tree. It takes and returns a list of bytes
// to allow multiple leaves to be part of a single proof, for instance in a range proof.
// ProofOp() encodes the ProofOperator in a generic way so it can later be
// decoded with OpDecoder.
type ProofOperator interface {
Run([][]byte) ([][]byte, error)
GetKey() []byte
ProofOp() ProofOp
}
//----------------------------------------
// Operations on a list of ProofOperators
// ProofOperators is a slice of ProofOperator(s).
// Each operator will be applied to the input value sequentially
// and the last Merkle root will be verified with already known data
type ProofOperators []ProofOperator
func (poz ProofOperators) VerifyValue(root []byte, keypath string, value []byte) (err error) {
return poz.Verify(root, keypath, [][]byte{value})
}
func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (err error) {
keys, err := KeyPathToKeys(keypath)
if err != nil {
return
}
for i, op := range poz {
key := op.GetKey()
if len(key) != 0 {
if len(keys) == 0 {
return cmn.NewError("Key path has insufficient # of parts: expected no more keys but got %+v", string(key))
}
lastKey := keys[len(keys)-1]
if !bytes.Equal(lastKey, key) {
return cmn.NewError("Key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key))
}
keys = keys[:len(keys)-1]
}
args, err = op.Run(args)
if err != nil {
return
}
}
if !bytes.Equal(root, args[0]) {
return cmn.NewError("Calculated root hash is invalid: expected %+v but got %+v", root, args[0])
}
if len(keys) != 0 {
return cmn.NewError("Keypath not consumed all")
}
return nil
}
//----------------------------------------
// ProofRuntime - main entrypoint
type OpDecoder func(ProofOp) (ProofOperator, error)
type ProofRuntime struct {
decoders map[string]OpDecoder
}
func NewProofRuntime() *ProofRuntime {
return &ProofRuntime{
decoders: make(map[string]OpDecoder),
}
}
func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) {
_, ok := prt.decoders[typ]
if ok {
panic("already registered for type " + typ)
}
prt.decoders[typ] = dec
}
func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) {
decoder := prt.decoders[pop.Type]
if decoder == nil {
return nil, cmn.NewError("unrecognized proof type %v", pop.Type)
}
return decoder(pop)
}
func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) {
var poz ProofOperators
for _, pop := range proof.Ops {
operator, err := prt.Decode(pop)
if err != nil {
return nil, cmn.ErrorWrap(err, "decoding a proof operator")
}
poz = append(poz, operator)
}
return poz, nil
}
func (prt *ProofRuntime) VerifyValue(proof *Proof, root []byte, keypath string, value []byte) (err error) {
return prt.Verify(proof, root, keypath, [][]byte{value})
}
// TODO In the long run we'll need a method of classifcation of ops,
// whether existence or absence or perhaps a third?
func (prt *ProofRuntime) VerifyAbsence(proof *Proof, root []byte, keypath string) (err error) {
return prt.Verify(proof, root, keypath, nil)
}
func (prt *ProofRuntime) Verify(proof *Proof, root []byte, keypath string, args [][]byte) (err error) {
poz, err := prt.DecodeProof(proof)
if err != nil {
return cmn.ErrorWrap(err, "decoding proof")
}
return poz.Verify(root, keypath, args)
}
// DefaultProofRuntime only knows about Simple value
// proofs.
// To use e.g. IAVL proofs, register op-decoders as
// defined in the IAVL package.
func DefaultProofRuntime() (prt *ProofRuntime) {
prt = NewProofRuntime()
prt.RegisterOpDecoder(ProofOpSimpleValue, SimpleValueOpDecoder)
return
}

Some files were not shown because too many files have changed in this diff Show More