Compare commits

...

332 Commits

Author SHA1 Message Date
Ethan Buchman
d419fffe18 Merge pull request #2350 from tendermint/release/v0.24.0
Release/v0.24.0
2018-09-07 07:46:44 -04:00
Ethan Buchman
c8895dab98 minor note in UPGRADING.md 2018-09-07 07:43:41 -04:00
Ethan Buchman
8b94deca73 Merge pull request #2354 from tendermint/anton/fix-typos-in-spec
Fix typos in spec
2018-09-07 07:28:50 -04:00
Ethan Buchman
4cd2e40fb1 TMBIN -> Amino 2018-09-07 07:28:58 -04:00
Anton Kaliaev
47dc4e6428 fix a few typos in spec 2018-09-07 11:40:16 +04:00
Ethan Buchman
94288006ba minor fixes 2018-09-06 22:47:05 -04:00
Ethan Buchman
22445a5029 Merge pull request #2349 from tendermint/release/v0.24.0
update UPGRADING.md and minor docs fixes
2018-09-06 22:25:20 -04:00
Ethan Buchman
299d46304d update UPGRADING.md and minor docs fixes 2018-09-06 22:35:31 -04:00
Ethan Buchman
5106af484f docs: add abci spec to config.js 2018-09-06 22:18:15 -04:00
Ethan Buchman
114c405120 docs/spec/abci: fixes and more from #2249 2018-09-06 22:17:00 -04:00
Ethan Buchman
246a56283a Merge pull request #2343 from tendermint/release/v0.24.0
Major spec update to prepare v0.24.0 for release
2018-09-06 20:44:54 -04:00
Ethan Buchman
1144e72c61 docs: refactor ABCI docs
* move spec/software/abci.md to spec/abci/apps.md and improve it
* move some of app-dev/app-development.md to spec/abci/client-server.md
2018-09-06 20:51:36 -04:00
Ethan Buchman
3fd54c5df5 docs/spec/abci: update spec
* better overview section
* section on tags
* remove notes about state/concurrency from CheckTx
* incorporate feedback from #2249
* explain how validator set updates effect future blocks
2018-09-06 19:36:30 -04:00
Ethan Buchman
20c55cffc4 docs: move app-dev/abci-spec.md to spec/abci/abci.md 2018-09-06 18:36:11 -04:00
Ethan Buchman
dea34506fb types/time: add note about stripping monotonic part 2018-09-06 17:58:52 -04:00
Ethan Buchman
54fe6ef73c version: add and bump abci version 2018-09-06 17:58:12 -04:00
Ethan Buchman
6fd79d1545 docs/spec/blockchain: remove tags from result for now 2018-09-06 12:47:26 -04:00
Ethan Buchman
8fcabe8b05 docs: fix note about ChainID size 2018-09-06 12:42:23 -04:00
Ethan Buchman
e0fa827a53 docs/spec/blockchain: specify consensus params in state.md 2018-09-06 12:41:57 -04:00
Ethan Buchman
bdf3238710 docs/spec/blockchain: bring blockchain.md up-to-date 2018-09-06 12:41:33 -04:00
Ethan Buchman
ed9e00a8a7 docs/spec/blockchain: fix encoding JSON 2018-09-06 12:41:02 -04:00
Ethan Buchman
604eae86b6 Merge branch 'master' into release/v0.24.0 2018-09-05 19:15:06 -04:00
Ethan Buchman
b616e54c9b changelog and version 2018-09-05 19:26:55 -04:00
Ethan Buchman
fae21c9f52 linkify changelog pending 2018-09-05 19:25:33 -04:00
Ethan Buchman
8c0c4844b6 Merge pull request #2342 from tendermint/dev/changelog2
add script to add links in changelog
2018-09-05 19:10:48 -04:00
Ethan Buchman
bbc30f992e Merge pull request #2340 from tendermint/bucky/changelog
name drop external contribs in changelog
2018-09-05 19:03:03 -04:00
Ethan Buchman
91f8af8fd8 docs/spec/blockchain: update vote, signature, time 2018-09-05 19:14:18 -04:00
ValarDragon
0881068d76 add script to add links in changelog 2018-09-05 15:58:29 -07:00
Ethan Buchman
80c217089a docs/spec/blockchain: remove json tags, dont use HexBytes 2018-09-05 18:57:54 -04:00
Ethan Buchman
61914cf48e docs/tm-core/using-tm: fix indentation for genesis.validators 2018-09-05 18:56:58 -04:00
Ethan Buchman
4416c9e4bc fix links in abci readme. fixes #2335 2018-09-05 18:44:48 -04:00
Ethan Buchman
c9510d0f50 name drop external contribs in changelog 2018-09-05 18:26:12 -04:00
Ethan Buchman
892b170818 Bucky/changelog (#2339)
* update changelog pending

* fixes from review
2018-09-05 18:02:45 -04:00
Zach
7f6bd5c161 docs & spec: deduplicate block-structure.md (#2331) 2018-09-05 12:21:04 -04:00
Anton Kaliaev
eb5cf0f0dd ignore existing peers in DialPeersAsync (#2327)
* ignore existing peers in DialPeersAsync

Fixes #2253

* rename HasPeerWithAddress to IsDialingOrExistingAddress

[breaking] remove Switch#IsDialing

* check if addrBook is nil

to be consistent with other usages of addrBook across Switch

* different log messages for 2 use-cases
2018-09-05 11:52:22 -04:00
Ethan Buchman
2a3520a714 Merge pull request #2134 from tendermint/2027-cs-failure
Add priv_validator_laddr to config.toml
2018-09-05 11:48:34 -04:00
Ethan Buchman
39ab199181 Merge pull request #2333 from tendermint/anton/docs
Update indexing docs & write a test cast
2018-09-05 11:41:42 -04:00
Ethan Buchman
4c7591cf6b Merge pull request #2336 from tendermint/zach/hotfix
fix failing website build
2018-09-05 09:14:47 -04:00
zramsay
0149c8ffee fix failing website build 2018-09-05 08:56:48 -04:00
Zach
9db66deaa2 test make localnet in CI (#2281)
* tests: use make localnet

based on @jackzampolin work in:
https://github.com/cosmos/cosmos-sdk/pull/2067

* keep the p2p tests for now

* fixes after my own review

* nohup

* remove nohup
2018-09-05 13:25:15 +04:00
Anton Kaliaev
098681fd91 test searching txs by height
Refs #2051
2018-09-05 12:01:38 +04:00
Anton Kaliaev
cb91cd5965 [docs] one can also index txs by height now 2018-09-05 11:05:06 +04:00
Zach
92185c017c Several minor docs & spec cleanup (#2330)
* addr_book_strick=false on local nets

* link to spec

* spec: remove TODO, see #1749 instead

* spec: make issues from TODOs

* update docs on addr_book_strict option
2018-09-05 10:30:36 +04:00
JamesRay
d0bb1ab2b0 Filter out empty addresses in persistent_peers/seeds lists (#2323)
Fixes #2320
2018-09-05 10:13:25 +04:00
cong
d27cd972d2 Index tx.height (#2324)
Refs #2051
2018-09-05 10:00:10 +04:00
Anton Kaliaev
29d2db352e update outdated abci-cli install instructions (#2325)
https://github.com/tendermint/tendermint/pull/2301
2018-09-04 23:20:45 +04:00
Ethan Buchman
eabb1ece8e tmtime: Canonical, some comments (#2312) 2018-09-04 12:20:58 +04:00
Anton Kaliaev
166fd82b70 max-bytes PR follow-up (#2318)
* ReapMaxTxs: return all txs if max is negative

this mirrors ReapMaxBytes behavior

See https://github.com/tendermint/tendermint/pull/2184#discussion_r214439950

* increase MaxAminoOverheadForBlock

tested with:

```
func TestMaxAminoOverheadForBlock(t *testing.T) {

        maxChainID := ""
        for i := 0; i < MaxChainIDLen; i++ {
                maxChainID += "𠜎"
        }

        h := Header{
                ChainID:            maxChainID,
                Height:             10,
                Time:               time.Now().UTC(),
                NumTxs:             100,
                TotalTxs:           200,
                LastBlockID:        makeBlockID(make([]byte, 20), 300, make([]byte, 20)),
                LastCommitHash:     tmhash.Sum([]byte("last_commit_hash")),
                DataHash:           tmhash.Sum([]byte("data_hash")),
                ValidatorsHash:     tmhash.Sum([]byte("validators_hash")),
                NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")),
                ConsensusHash:      tmhash.Sum([]byte("consensus_hash")),
                AppHash:            tmhash.Sum([]byte("app_hash")),
                LastResultsHash:    tmhash.Sum([]byte("last_results_hash")),
                EvidenceHash:       tmhash.Sum([]byte("evidence_hash")),
                ProposerAddress:    tmhash.Sum([]byte("proposer_address")),
        }
        b := Block{
                Header:     h,
                Data:       Data{Txs: makeTxs(10000, 100)},
                Evidence:   EvidenceData{},
                LastCommit: &Commit{},
        }

        bz, err := cdc.MarshalBinary(b)
        require.NoError(t, err)

        assert.Equal(t, MaxHeaderBytes+MaxAminoOverheadForBlock-2, len(bz)-1000000-20000-1)
}
```

* fix MaxYYY constants calculation

by using math.MaxInt64

See https://github.com/tendermint/tendermint/pull/2184#discussion_r214444244

* pass mempool filter as an option

See https://github.com/tendermint/tendermint/pull/2184#discussion_r214445869

* fixes after Dev's comments
2018-09-04 11:46:34 +04:00
Ismail Khoffi
1de32fba17 Check for int overflow in clist (#2289)
* explicitly panic if max capacity is reached

* address review comments

* comments and a test
2018-09-02 02:13:09 -04:00
Zarko Milosevic
7b88172f41 Implement BFT time (#2203)
* Implement BFT time

* set LastValidators when creating state in state helper

for heights >= 2
2018-08-31 19:33:51 -04:00
Ethan Buchman
ffe91ae9e3 Merge pull request #2184 from tendermint/2035-max-bytes
MaxBytes
2018-08-31 15:16:37 -04:00
Ethan Buchman
03afad3218 Merge pull request #2307 from tendermint/master
libs/autofile: bring back loops (#2261)
2018-08-31 14:42:48 -04:00
Peng Zhong
5ecdfacb8e fix docs base directory (#2295) 2018-08-31 14:09:43 -04:00
Ethan Buchman
9e940b95ad libs/autofile: bring back loops (#2261)
* libs/autofile: bring back loops

* changelog, version
2018-08-31 14:05:49 -04:00
Anton Kaliaev
4147f856dc update arch doc 2018-08-31 16:10:41 +04:00
Anton Kaliaev
02d1b03abb update comment for MaxBlockSizeBytes 2018-08-31 16:01:22 +04:00
Anton Kaliaev
e873fed815 calculate amino overhead on the fly 2018-08-31 16:01:22 +04:00
Anton Kaliaev
e957f322c7 be more precise in comments 2018-08-31 16:01:21 +04:00
Anton Kaliaev
ad3d42981a update Gopkg.lock 2018-08-31 16:01:21 +04:00
Anton Kaliaev
0f7485690e limit chain ID to 50 symbols max 2018-08-31 16:01:21 +04:00
Anton Kaliaev
d73c5cbdb1 reap max bytes from the mempool & check transaction size
See ADR 020: Limiting txs size inside a block docs/architecture/adr-020-block-size.md

Refs #2035
2018-08-31 16:01:21 +04:00
Anton Kaliaev
7b2f7090fd add missing changelog entry (#2303)
https://github.com/tendermint/tendermint/pull/2264#issuecomment-417378396
2018-08-31 11:59:52 +04:00
Dev Ojha
61ab10d655 config: reduce default mempool size (#2300)
* config: reduce default mempool size

This reduces the mempool size from 100k to 5k. Note that each secp256k1 sig
takes .5ms to compute. Therefore an adversary could previously delay every
node on the network's computation time upon receiving a block by 50 seconds.

This now reduces that ability to being able to only delay each node by 2.5
seconds. This change should be reverted once ABCI recheck is implemented.

* (squash this) fix test
2018-08-30 17:41:58 -04:00
Anton Kaliaev
3a6cc5e6af cache codecov script (#2291) 2018-08-30 10:11:33 +04:00
Alessio Treglia
c43fb700e3 New NewGoLevelDBWithOpts() to pass opts down to goleveldb (#2293)
Closes: #2292
2018-08-29 08:44:55 +04:00
Dev Ojha
bd531401a0 mempool: Store txs by hash inside of cache (#2234)
* mempool: Store txs by hash inside of cache

This allows for large cachesizes, without fear of the memory
consumption growing rapidly.

* (squash this) rename hashedTx -> txHash
2018-08-29 08:10:06 +04:00
Ismail Khoffi
9d06d7e306 update secret connection to use a little endian encoded nonce (#2264)
* update secret connection to use a little endian encoded nonce

* update encoding of chunk length to be little endian, too

* update comment

* Change comment slightly to trigger circelci
2018-08-28 09:37:38 +04:00
Dev Ojha
b1bc3e4f89 crypto/secp256k1: Fix signature malleability, adopt more efficient en… (#2239)
* crypto/secp256k1: Fix signature malleability, adopt more efficient encoding

This removes signature malleability per ADR 14, and makes secp match
the encoding in ADR 15.

* (squash this) add lock
2018-08-28 09:32:54 +04:00
Ethan Buchman
38b401657e Cleanup up Multisig naming (#2255)
* crypto/multisig: Pubkey -> PubKey

* crypto/encoding/amino: use pkg vars for routes

* crypto/multisig/bitarray

* crypto/multisig: ThresholdMultiSignaturePubKey -> PubKeyMultisigThreshold

* crypto/encoding/amino: add PubKeyMultisig to table

* remove bA import alias

https://github.com/tendermint/tendermint/pull/2255#discussion_r211900709
2018-08-28 08:41:40 +04:00
Zach
8972b6e293 Update config.js (#2287) 2018-08-28 08:37:35 +04:00
Alessio Treglia
5f255f0f71 Replace db_path with db_dir in default configuration (#2284)
* db_path is not being parsed

Fix default configuration, db_path is now db_dir.

Closes: cosmos/cosmos-sdk#1712

* Update CHANGELOG_PENDING.md
2018-08-27 17:27:18 +04:00
Peng Zhong
20e35654c6 lint markdown docs using a stop-words and write-good linters (#2195)
* lint docs with write-good, stop-words

* remove package-lock.json

* update changelog

* fix wrong paragraph formatting

* fix some docs formatting

* fix docs format

* fix abci spec format
2018-08-27 11:33:46 +04:00
Ahmad M ElShareif
8a84593c02 Reduce code in common/math (#2274) 2018-08-27 10:43:15 +04:00
Zach
aab5947d82 docs: deprecate RTD (#2280) 2018-08-27 10:37:54 +04:00
Zach
2f7fc87230 docs: fix img links, closes #2214 (#2282) 2018-08-27 10:36:22 +04:00
Anton Kaliaev
1cf6712a36 quick fix for CircleCI (#2279)
See https://discuss.circleci.com/t/saving-cache-stopped-working-warning-skipping-this-step-disabled-in-configuration/24423/2
2018-08-27 10:19:27 +04:00
Dev Ojha
43ebc77f9b Fix typo, closes #2269 (#2277) 2018-08-26 11:45:34 +04:00
Ethan Buchman
debe56326f Merge pull request #2159 from tendermint/bucky/abci-validators
Bucky/abci validators
2018-08-17 11:12:27 -04:00
Ethan Buchman
6dde320591 fixes from review 2018-08-17 10:32:10 -04:00
bradyjoestar
62b2093da5 ABCIAppClient conn close (#2236)
Refs https://github.com/grpc/grpc-go/issues/2264
2018-08-17 11:59:46 +04:00
Ethan Buchman
76bb4b15c7 rebase fixes 2018-08-16 13:22:04 -04:00
Ethan Buchman
0cbf9b2a7d update changelog 2018-08-16 13:19:30 -04:00
Ethan Buchman
0701d79046 minor fixes 2018-08-16 13:19:14 -04:00
Ethan Buchman
4f61b97bbe update dep for proto. fix types/proto3/block.proto 2018-08-16 13:19:14 -04:00
Ethan Buchman
1111c1848d update abci spec 2018-08-16 13:19:14 -04:00
Ethan Buchman
c919643c3e abci: move round back from votes to commit 2018-08-16 13:19:14 -04:00
Ethan Buchman
b189ab676f makefile: lint flags 2018-08-16 13:19:14 -04:00
Ethan Buchman
fe6a504374 revert gogo version used to generate files 2018-08-16 13:19:13 -04:00
Ethan Buchman
91376627ea update ADR 2018-08-16 13:19:13 -04:00
Ethan Buchman
e3f54ece2f abci: VoteInfo, ValidatorUpdate. See ADR-018 2018-08-16 13:19:13 -04:00
Ethan Buchman
f26b83f15f abci: add next_validators_hash to header 2018-08-16 13:19:13 -04:00
Ethan Buchman
1f6c7bf22a make: update protoc_abci use of awk 2018-08-16 13:19:13 -04:00
Ethan Buchman
d69cf9dd2f Merge pull request #2231 from tendermint/anton/changelog-2
Add a changelog entry & Upgrading guides
2018-08-15 10:19:06 -04:00
Anton Kaliaev
4e78badac9 docs: note max outbound peers excludes persistent 2018-08-15 12:54:20 +04:00
Anton Kaliaev
684e3cb446 add upgrading guides 2018-08-15 12:53:00 +04:00
Anton Kaliaev
a649deb6ee add a changelog entry 2018-08-15 12:52:43 +04:00
bradyjoestar
5446452b01 pass in NodeKey to NewNode (#2212)
Fixes #1544
2018-08-15 12:29:45 +04:00
bradyjoestar
ad24d66750 [abci-cli] print out all the sub-commands available (#2219) 2018-08-15 12:28:17 +04:00
Anton Kaliaev
eb98f1c3a9 add missing changelog entries (#2224) 2018-08-14 19:16:18 -04:00
Dev Ojha
728d2ed266 crypto: Remove unnecessary prefixes from amino route variable names (#2205)
* crypto: Remove unnecessary ed25519 and secp256k1 prefixes from amino routes.

* (squash this) add changelog

* (squash this) multisig amino fixes

* (squash this) fix build error
2018-08-14 19:13:25 -04:00
Zach
e10666859f Zach/automated docs (#2225)
* add docs/config.js for better developer experience

* update docs_readme :)
2018-08-14 18:40:05 -04:00
Anton Kaliaev
6fad8eaf5a [p2p/pex] connect to more than 10 peers (#2169)
* [p2p/pex] connect to more than 10 peers

also, remove DefaultMinNumOutboundPeers because a) I am not sure it's
needed b) it's super confusing

look closely

```
maxPeers := sw.config.MaxNumPeers - DefaultMinNumOutboundPeers
if maxPeers <= sw.peers.Size() {
  sw.Logger.Info("Ignoring inbound connection: already have enough peers", "address", inConn.RemoteAddr().String(), "numPeers", sw.peers.Size(), "max", maxPeers)
```

we print maxPeers = config.MaxPeers - DefaultMinNumOutboundPeers. So we
may not have enough peers even though we say we have enough.

Refs #2130

* update spec

* replace MaxNumPeers with MaxNumInboundPeers/MaxNumOutboundPeers

Refs #2130

* update changelog

* make max rpc conns formula visible to users

* update spec

* docs: note max outbound peers excludes persistent
2018-08-14 18:25:56 -04:00
Ethan Buchman
db53dc5fd4 Merge pull request #2164 from tendermint/dev/multisig
Threshold Multisignature implementation
2018-08-14 16:37:07 -04:00
ValarDragon
2fe34491ba (squash this) Fix build errors 2018-08-14 11:42:40 -07:00
Anton Kaliaev
80e49abada send ValidatorSetUpdates event when validator set changes (#2161)
Refs #1916
2018-08-14 19:16:35 +04:00
b00f
0f931eeb10 types: allow genesis file to have 0 validators (#2148)
* fixing issue 2015

* Remove comments for code review

* Update tests
2018-08-14 19:02:53 +04:00
Dev Ojha
89668c3179 clist: Speedup functions (#2208)
* clist: Speedup detachNext() and detachPrev()

We used unnecessary function calls, defers, and extra mutexes.
These are not our friends for writing fast code in our libs.

* Remove more defers from clist functions

* Add more benchmarks
2018-08-14 19:00:21 +04:00
Dev Ojha
d0dcb1cde1 cmap: Remove defers (#2210)
All functions in cmap have just one code path. Thus there is not a reason
to use defer statements.
2018-08-14 18:59:04 +04:00
bradyjoestar
ed08ae7321 [tm-monitor] use pubkey.Equals() func instead of raw == (#2221) 2018-08-14 18:57:48 +04:00
ValarDragon
6beaf6e72d (squash this) address Jae's comments on NumTrueBitsBefore 2018-08-13 18:46:33 -07:00
peerlink
3624a17642 blockchain: fix register concrete name. (#2213) 2018-08-13 17:40:49 +04:00
Dev Ojha
8a1a79257e mempool: Keep cache hashmap and linked list in sync (#2188)
* mempool: Keep cache hashmap and linked list in sync

This removes bugs with the linked list being full, but hashmap empty

* address PR comments

* switch clist back to list
2018-08-13 14:42:33 +04:00
Ethan Buchman
9c6fdad276 Merge pull request #2200 from tendermint/anton/missing-changelog-entry
Add missing changelog entry
2018-08-10 17:41:48 -04:00
ValarDragon
8d28344e84 (Squash this) switch to bare 2018-08-10 13:59:35 -05:00
ValarDragon
4cf1dbd676 (squash this) fix amino route 2018-08-10 13:20:59 -05:00
ValarDragon
00db469fc0 (squash this) begin addressing PR comments 2018-08-10 13:14:43 -05:00
Anton Kaliaev
93aadf160f add missing changelog entry
Refs #1954
2018-08-10 09:33:02 +04:00
Dev Ojha
2756be5a59 libs: Remove usage of custom Fmt, in favor of fmt.Sprintf (#2199)
* libs: Remove usage of custom Fmt, in favor of fmt.Sprintf

Closes #2193

* Fix bug that was masked by custom Fmt!
2018-08-10 09:25:57 +04:00
Anton Kaliaev
fc7c298cc0 Remove gogoproto from Makefile's TOOLS (#2198)
* remove gogoproto from tools

because it's not a binary

* update protobuf version to 3.6.1 in `make get_protoc`

* update libs/common/types.pb.go and rpc/grpc/types.pb.go

* fix app tests
2018-08-10 09:14:17 +04:00
bradyjoestar
785786bec4 add json2wal & fix wal2json (#2196)
* add json2wal & fix wal2json

* fix bug

* Update main.go

* add wal2JsonTest file

* Delete wal2JsonTest
2018-08-10 09:09:40 +04:00
ValarDragon
aab26c3ff7 Merge remote-tracking branch 'origin/develop' into dev/multisig 2018-08-09 01:05:39 -05:00
Dev Ojha
5a8fe61200 crypto: Add compact bit array for intended usage in the multisig (#2140)
* crypto: Add compact bit array for intended usage in the multisig

This is in a separate PR for ease of review.

* (squash this) add comment
2018-08-09 09:38:23 +04:00
Ian Tan
3c98cec2c2 Add ADR entry for ProposeTx (#1813)
This adds an ADR entry addressing the implementation of a `ProposeTx`
method in the ABCI proposed in #1776. Fundamentally, this proposal gives
some control of block proposals to the application. The initial use case is
to support the Minimal Viable Plasma specification.
2018-08-08 23:12:36 +04:00
Dev Ojha
1fbca09e3c [ADR] Proposal for multisignature encoding (#1960)
* ADR: Proposal for multisignature encoding

This proposal is partially tied to the resolution of #1957.

* Change title to Encoding standard for multisignatures

* ADR: Change multisigs ADR now that amino must be used for pubkeys

* Address PR comments
2018-08-08 23:10:33 +04:00
Jordan Bibla
37ea7040ef remove JB from codeowners file (#2174) 2018-08-08 13:03:00 +04:00
Ethan Buchman
6770992b01 Merge pull request #2181 from tendermint/zach/docs-fixes
docs: fix links & other improvements
2018-08-07 20:35:13 -04:00
Zach Ramsay
b30596b3a1 docs: fix links & other imrpvoements 2018-08-07 19:47:51 -04:00
Ethan Buchman
ef5c27a2d2 Merge pull request #2154 from tendermint/bucky/speed-up-tests
speed up some tests. ref #2038
2018-08-07 15:57:08 -04:00
Jun Kimura
e1b9bf7c81 set capacity of txsmap (#2166) 2018-08-07 20:50:10 +04:00
ValarDragon
4e7bf10b59 (squash this) squashed bug with multiple signatures at same index. 2018-08-07 11:37:42 -05:00
ValarDragon
67b6d51ff4 (squash this) address PR comments + fix bug in equality check 2018-08-07 10:44:38 -05:00
Dev Ojha
6dbbdb9438 Merge branch 'dev/compact_bitmap' into dev/multisig 2018-08-06 21:20:23 -05:00
ValarDragon
e7dd76c28d crypto: Threshold multisig implementation 2018-08-06 21:17:38 -05:00
ValarDragon
21448bcf4f crypto: Add compact bit array for intended usage in the multisig
This is in a separate PR for ease of review.
2018-08-06 15:55:02 -05:00
Ethan Buchman
ec3e34efd8 Merge pull request #2145 from tendermint/bucky/adr-chain-versions
adr: chain-versions
2018-08-05 22:48:06 -04:00
Ethan Buchman
b19e148bc5 Merge pull request #2144 from tendermint/bucky/adr-protocol-versions
adr: protocol versioning
2018-08-05 22:47:12 -04:00
Ethan Buchman
6f8b62d1f3 Merge pull request #2149 from tendermint/bucky/adr-abci-validators
[ADR] ABCI Validators
2018-08-05 22:44:38 -04:00
Ethan Buchman
e0e19a24a4 Merge pull request #2157 from tendermint/master
Merge pull request #2152 from tendermint/release/v0.23.0
2018-08-05 17:12:01 -04:00
Ethan Buchman
013b9cef64 Merge pull request #2152 from tendermint/release/v0.23.0
Release/v0.23.0
2018-08-05 17:11:22 -04:00
Ethan Buchman
087b657008 speed up some tests. ref #2038 2018-08-05 16:59:23 -04:00
Ethan Buchman
fe835cd456 Merge pull request #2116 from tendermint/265-change-abci-header-to-match-tm
change ABCI header to match Tendermint exactly
2018-08-05 16:55:10 -04:00
Anton Kaliaev
d7035abe73 change ABCI header to match Tendermint exactly
Now that Tendermint Amino will be compatible with proto3, the Header in ABCI
should exactly match the Tendermint header - they will then be encoded
identically in ABCI and in Tendermint Core.

Refs #265
2018-08-05 16:57:38 -04:00
Ethan Buchman
f2b629680a Merge pull request #2153 from tendermint/bucky/merge-0.23.0-to-develop
Bucky/merge 0.23.0 to develop
2018-08-05 16:43:00 -04:00
Ethan Buchman
720ce658f1 Merge branch 'release/v0.23.0' into bucky/merge-0.23.0-to-develop 2018-08-05 16:42:04 -04:00
Ethan Buchman
309a6772d7 types: fix formatting when printing signatures
- use cmn.Fingerprint and %X
2018-08-05 16:35:43 -04:00
Ethan Buchman
8bd514d9fb update changelog 2018-08-05 15:55:48 -04:00
ValarDragon
f903947ff3 crypto: Remove interface from crypto.Signature
Signatures are now []byte, which saves on the number of bytes after
amino encoding

(squash this) address Ismail's comment
2018-08-05 15:46:57 -04:00
Ethan Buchman
ea67fb55eb Merge pull request #2106 from tendermint/1134-add-proposer-to-the-block
add proposer address to block's Header
2018-08-05 15:18:42 -04:00
Ethan Buchman
e1062a657f fixes for ProposerAddress
- state.MakeBlock takes a proposerAddr
- validateBlock only checks that the ProposerAddress is in the validator
  set
- fix raceyness from bad proposer test:
  - use privValidator to get the proposer address (instead of racy
    state)
  - note we had to remove the test that checked the correct proposer was
    included for higher rounds because we don't have a good way to test
    this with multiple consensus states and not using the
    privValidator.Address while calling createProposalBlock was a hack!
2018-08-05 15:19:21 -04:00
Ethan Buchman
4d998b7c03 consensus: failing test for ProposerAddress 2018-08-05 15:17:43 -04:00
Anton Kaliaev
bec9d5cba9 add proposer address to block's Header
Refs #1134

Validation:

- ignored in block.ValidateBasic since it's stateful information
- checked in blockExec.ValidateBlock
2018-08-05 15:16:49 -04:00
Ethan Buchman
06a157ad06 Merge pull request #1815 from tendermint/jae/literefactor4
ValidatorSet change delayed by 1 block, and lite refactor (#2)
2018-08-05 14:54:07 -04:00
Dev Ojha
d6a666b445 p2p/pex: Fix mismatch between dialseeds and checkseeds. (#2151) 2018-08-05 14:47:01 -04:00
Ethan Buchman
c5c2b9601f update changelog and version 2018-08-05 14:14:56 -04:00
Ethan Buchman
7538864c15 Merge branch 'develop' into jae/literefactor4 2018-08-05 13:51:41 -04:00
Ethan Buchman
279259ec8e adr-018: abci validators 2018-08-05 12:45:27 -04:00
Ethan Buchman
ca9d07e5e4 update deps for amaino v0.12.0-rc0 2018-08-05 12:39:08 -04:00
Dev Ojha
6e3c5e8033 p2p/pex: Allow configured seed nodes to not be resolvable over DNS (#2129)
* p2p/pex: Allow configured seed nodes to be offline

Previously you couldn't startup tendermint if a seed node was offline.
This now allows you to startup tendermint, as long as all seed node addresses
are formatted correctly. In the event that all seed nodes are down,
and the address book is empty, then it crashes with an informative error msg.
(This case doesn't occur if no seeds were specified)

Closes #1716

* (Squash this) Address melekes' comments

* (squash this) fix package imports

* (squash this) fix pex_reactor comment

* (squash this) add a test case
2018-08-04 20:35:08 -04:00
Ethan Buchman
8073e51b04 Merge pull request #2096 from tendermint/dev/adr_symmetric
[ADR] Proposal for encoding symmetric cryptography
2018-08-04 20:27:24 -04:00
Ethan Buchman
3161ebbc2f Merge pull request #2091 from tendermint/dev/adr_secp_signatures
[ADR] Fix malleability problems in Secp256k1 signatures
2018-08-04 20:22:56 -04:00
Ethan Buchman
4cbeb30da2 Merge pull request #2136 from tendermint/1944-update-grpc
update genproto
2018-08-03 23:40:21 -04:00
Ethan Buchman
d5b5e5a2e4 Merge pull request #2135 from tendermint/2072-unresponsive-tm-after-cs-failure
consensus: non-responsive to CTRL-C if consensus state panics
2018-08-03 23:39:25 -04:00
ValarDragon
6691492540 (squash this) indicate what Ethereum does 2018-08-03 17:49:46 -07:00
Ethan Buchman
0f80a7da82 adr: chain-versions 2018-08-03 20:23:37 -04:00
Ethan Buchman
ae2238efe6 adr: protocol versioning 2018-08-03 20:21:40 -04:00
Anton Kaliaev
2878c7523f update github bug report template (#2131) 2018-08-03 11:39:57 +04:00
Anton Kaliaev
b1cff0f9bf [libs/autofile] create a Group ticker on Start
1) no need to stop the ticker in createTestGroup() method
2) now there is a symmetry - we start the ticker in OnStart(), we stop it
in OnStop()

Refs #2072
2018-08-03 11:34:58 +04:00
Anton Kaliaev
d09a3a6d3a stop gracefully instead of trying to resume ops
Refs #2072

We most probably shouldn't be running any further when there is some
unexpected panic. Some unknown error happened, and so we don't know if
that will result in the validator signing an invalid thing. It might be
worthwhile to explore a mechanism for manual resuming via some console
or secure RPC system, but for now, halting the chain upon unexpected
consensus bugs sounds like the better option.
2018-08-03 11:24:55 +04:00
ValarDragon
87f09adeec (Squash this) Be more explicit about the exact encoding of the secp signature 2018-08-02 23:27:16 -07:00
ValarDragon
b3a3c8a192 Merge remote-tracking branch 'origin/develop' into dev/adr_secp_signatures 2018-08-02 23:25:14 -07:00
ValarDragon
96fdec0fca crypto: Add compact bit array for intended usage in the multisig
This is in a separate PR for ease of review.
2018-08-02 23:18:09 -07:00
Ethan Buchman
fe5e7808f2 fix Gopkg.lock 2018-08-02 19:15:32 -04:00
Ethan Buchman
2d1c5a1ce6 Merge remote-tracking branch 'origin/develop' into jae/literefactor4 2018-08-02 19:12:22 -04:00
Ethan Buchman
00ebdcd581 update pending changelog 2018-08-02 19:06:29 -04:00
Ethan Buchman
2487210414 Merge pull request #2097 from tendermint/1772-revert
revert "make `/status` RPC endpoint resistant to consensus halt"
2018-08-02 17:31:07 -04:00
ValarDragon
a040c36dfb (squash this) change adr number, remove redundancy in function names 2018-08-02 10:43:47 -07:00
Anton Kaliaev
d579f4c610 update genproto
Closes #1944
2018-08-02 17:54:55 +04:00
Anton Kaliaev
b82138b002 update changelog 2018-08-02 16:48:12 +04:00
Anton Kaliaev
8ed99c2c13 exit from initSighupWatcher child goroutine
also, remove excessive log message

Refs #2072
2018-08-02 16:42:25 +04:00
Anton Kaliaev
4c5a143a70 respawn receiveRoutine so we can properly exit
Closes #2072
2018-08-02 16:36:28 +04:00
Anton Kaliaev
b33f73eaf1 stop autofile and autogroup properly
NOTE: from the ticker#Stop documentation:

```
Stop does not close the channel, to prevent a read from the channel
succeeding incorrectly.
https://golang.org/src/time/tick.go?s=1318:1341#L35
```

Refs #2072
2018-08-02 16:33:34 +04:00
Jae Kwon
e719a93d1d Addressed review for #1815 except those marked as 'TODO make issue' 2018-08-02 03:10:50 -07:00
Jae Kwon
eb9b37e196 Pull out consensus liveness fix, which went to #1815 2018-08-02 01:59:46 -07:00
Dev Ojha
eaa137512c adr: Encoding for cryptography at launch (#2121) 2018-08-01 18:19:21 -04:00
Dev Ojha
023bb99eb0 p2p: Add test vectors for deriving secrets (#2120)
These test vectors are needed for comparison with the Rust implementation.
To implement this effectively, a "RandBool" method was added to cmn.Rand.
2018-08-01 15:06:29 -04:00
Anton Kaliaev
f2f53442c6 reorder BaseConfig according to generated version
also, add `priv_validator_laddr` to the template
2018-08-01 16:20:59 +04:00
Ismail Khoffi
24ae878b9f update encoding test to how amino skips empty pointers 2018-08-01 13:29:41 +02:00
Jae Kwon
619bb3b2d7 Merge remote-tracking branch 'remotes/origin/jae/literefactor5' into jae/literefactor6 2018-08-01 03:06:00 -07:00
Dev Ojha
dde96b75ce abci: Update readme for building protoc (#2124) 2018-08-01 13:57:31 +04:00
Dev Ojha
6fb2f44cc3 p2p: Connect to peers from a seed node immediately (#2115)
This is to reduce wait times when initially connecting. This still runs checks
such as whether you still want additional peers.

A test case has been created, which fails if this change is not included.
2018-07-31 22:09:01 +02:00
Zarko Milosevic
08ad162daa docs: Modify blockchain spec to reflect validator set changes (#2107) 2018-07-31 20:19:57 +02:00
ValarDragon
a83eed104c libs/cmn: Remove Tempfile, Tempdir, switch to ioutil variants (#2114)
Our Tempfile was just a wrapper on ioutil that panicked instead of error.

Our Tempdir was a less safe variant of ioutil's Tempdir.
2018-07-31 19:43:36 +02:00
ValarDragon
be642754f5 libs/cmn/writefileatomic: Handle file already exists gracefully (#2113)
This uses the stdlib's method of creating a tempfile in our write
file atomimc method, with a few modifications. We use a 64 bit number
rather than 32 bit, and therefore a corresponding LCG. This is to
reduce collision probability. (Note we currently used 32 bytes previously,
so this is likely a concern)

We handle reseeding the LCG in such a way that multiple threads are
even less likely to reuse the same seed.
2018-07-31 19:43:36 +02:00
ValarDragon
2608249e5b libs/common: Refactor tempfile code into its own file 2018-07-31 19:43:36 +02:00
Anton Kaliaev
62b8ee270d [docs] Validator's address can be skipped (#2117)
Refs #1712
2018-07-31 18:28:19 +02:00
Anton Kaliaev
0c7338c5f0 abci: Change validators to last_commit_info in RequestBeginBlock (#2074)
* change validators to last_commit_info in RequestBeginBlock
* do not send pubkeys with RequestBeginBlock

Refs #1856
2018-07-30 17:29:40 +02:00
Dev Ojha
231cdc1320 ci: Reduce log output in test_cover (#2105)
This commit makes it such that circle CI only shows the module whose
tests it is currently running in the log, unless a test fails. For each
failing test, it will display the name of all failing tests, along with
their log output. This is done to make
our log output far more scrollable. We lose no information in debugging.
2018-07-30 01:05:27 +02:00
Zaki Manian
fef4fe1c66 Link to "The latest gossip on BFT consensus" (#2102)
The paper is such a useful resource to anyone building on Tendermint. It should be linked in the ReadMe.
2018-07-29 09:49:42 +04:00
Dev Ojha
f00b52b710 libs/autofile/group_test: Remove unnecessary logging (#2100)
Previously we logged `Testing for i <i>` for all i in [0,100).
This was unnecessary. This changes it to just log the value for i on
error messages, to reduce the unnecessary verbosity in log files.
2018-07-29 09:48:37 +04:00
VenkatDatta
188e459273 Removed unnecessary onStart call (#2098)
* Removed unnecessary onStart & onStop calls in reactor

* Refactor OnStart & OnStop in reactor

* Removed redundant OnStart func in reactor
2018-07-29 09:46:53 +04:00
ValarDragon
3d5d254932 (squash this) Mixed up field element and curve element. Idea still stands. 2018-07-28 20:41:19 -07:00
ValarDragon
ce9ddc7cd7 (squash this) Note not to overwrite aead's. 2018-07-28 06:33:15 -07:00
ValarDragon
c03ad56d55 (squash this) Note that this breaks existing keys. 2018-07-28 04:23:22 -07:00
ValarDragon
caef5dcd69 (Squash this) forgot to say that algo_name should be length prefixed 2018-07-28 04:14:07 -07:00
Anton Kaliaev
7634073718 revert "make /status RPC endpoint resistant to consensus halt"
Refs #1772

Reasons:
- this was a bad patch for something not well understood

Lessons learned:
- nobody should be modifying code without understanding the problem
first. it will only result in more technical debt and code rot.
- we never hide information when we suspect a bug or we'not sure what's
going on.
2018-07-28 09:17:42 +04:00
ValarDragon
af2894c0f8 (squash this) improve grammar. 2018-07-27 19:27:25 -07:00
ValarDragon
a2debe57c7 [ADR] Proposal for encoding symmetric cryptography 2018-07-27 18:10:41 -07:00
ValarDragon
5955eddc7d ADR: Fix malleability problems in Secp256k1 signatures
Previously you could not assume that your transaction hash would
appear on chain.
2018-07-27 13:18:21 -07:00
Ethan Buchman
4bab34ae45 Merge pull request #2088 from tendermint/bucky/fix-evidence-test
consensus: Fix test for blocks with evidence
2018-07-27 14:06:12 -04:00
Ethan Buchman
fe5b0d7074 consensus: fix test for blocks with evidence 2018-07-27 14:00:05 -04:00
Anton Kaliaev
96ae535fb8 proto3 timestamp (#2064)
This PR changes ABCI time format from int64 (Unix seconds) to WKT (WellKnownType) google.protobuf.Timestamp.

Refs #1857

Reasons:

better precision
standard DT for proto

* update Gopkg.lock
* [makefile] remove extra grep
    - go list excludes vendor by default now
* proto3 timestamp
* [docs/abci-spec] note about serialisation format
* make time non-nullable
2018-07-27 04:23:19 +02:00
Alexander Simmerl
4be6395ee0 Merge pull request #2085 from tendermint/master
Merge 0.23.8 back into develop
2018-07-27 04:21:34 +02:00
Dev Ojha
bc526f18a4 libs: Make bitarray functions lock parameters that aren't the caller (#2081)
This now makes bit array functions which take in a second bit array, thread
safe. Previously there was a warning on bitarray.Update to be lock the
second parameter externally if thread safety wasrequired.
This was not done within the codebase, so it was fine to change here.

Closes #2080
2018-07-27 04:21:08 +02:00
Jae Kwon
40d6dc2ee5 Merge pull request #2082 from tendermint/release/0.22.8
HotFix for 0.22.7, bump to 0.22.8
2018-07-26 19:13:27 -07:00
Jae Kwon
d542d2c394 Fix 0.22.7, bump to 0.22.8 2018-07-26 18:08:09 -07:00
Alexander Simmerl
fd29fd6465 adr: PeerTransport (#2069)
* p2p: Propose PeerTransport ADR
* adr: Set status to in review
* adr: Add high-level decision
* adr: Extend on the idea of guards
* adr: Rework guards into transport specific filters
* adr: Rename to nodeAddr
* adr: Incorporate review
2018-07-27 02:27:19 +02:00
Dev Ojha
6241e6b927 libs: update BitArray go docs (#2079) 2018-07-27 02:10:58 +02:00
Ethan Buchman
18acd77e40 Merge pull request #2076 from tendermint/hotfix/0.22.7
Hotfix/0.22.7
2018-07-26 19:00:50 -04:00
Hendrik Hofstadt
49b52ee3c7 Add test for MakePartSet with evidence 2018-07-26 19:00:07 -04:00
Ethan Buchman
e4dfab6349 changelog and version 2018-07-26 18:54:15 -04:00
Ethan Buchman
0e127562bf register evidence interface wherever its used 2018-07-26 18:53:19 -04:00
Dev Ojha
9f19229791 .github: Split the issue template into two seperate templates (#2073)
* .github: Split the issue template into two seperate templates

Now we have different bug report and feature request templates.

* Forgot to add the name, and about fields
2018-07-26 12:18:55 +04:00
Alexander Simmerl
bdab37a626 Merge pull request #2054 from tendermint/dev/secret_connection
secret connection update
2018-07-26 00:29:36 +02:00
ValarDragon
7bf28af590 p2p/secret_connection: Switch salsa usage to hkdf + chacha
This now uses one hkdf on the X25519 shared secret to create
a key for the sender and receiver.
The hkdf call is now just called upon the computed shared
secret, since the shared secret is a function of the pubkeys.

The nonces now start at 0, as we are using chacha as a stream
cipher, and the sender and receiver now have different keys.
2018-07-26 00:12:32 +02:00
Zaki Manian
1b04e4e5f1 p2p: Remove RipeMd160.
Generate keys with HKDF instead of hash functions, which provides better security properties.

Add xchacha20poly1305 to secret connection. (Due to rebasing, this code has been removed)
2018-07-26 00:09:37 +02:00
Zach
66fe5b7bae rpc: Improve slate for Jenkins (#2070) 2018-07-25 23:37:08 +02:00
Dev Ojha
24b94d7aa4 crypto: Switch hkdfchacha back to xchacha (#2058)
hkdfchachapoly was a construction we came up with. There is no longer any
reason to use it. We should instead just use xchacha for the remaining use
cases we have. (Symmetrically encrypting the keys in the sdk keys package)
2018-07-25 23:12:39 +02:00
Dev Ojha
0bd4fb96f0 crypto: Add benchmarking code for signature schemes (#2061)
* crypto: Add benchmarking code for signature schemes

This does a slight refactor for the key generation code. It now calls a
seperate unexported method to allow generation from a reader. I think this
will actually reduce time in generation, due to no longer initializing an
extra slice. This was needed in order to enable benchmarking.

This uses an internal package for the benchmarking code, so that this can
be standardized without being exported in the public API. The benchmarking
code is derived from agl/ed25519's benchmarking code, and has copied the
license over.

Closes #1984
2018-07-25 23:07:47 +02:00
Dev Ojha
9cfc47a93b makefile: Add make check_dep and remove make ensure_deps (#2055)
This adds a new makefile command, which is used in CI linting, `make check_dep`.
This ensures the toml is in sync with the lock, and that were not pinning to a
branch in any repository.

This also adapts `make get_vendor_deps` to check the lock, in addition to
populating the vendor directory. This removes the need for `make ensure_deps`.

This makes `make get_vendor_deps` consistent between tendermint and the sdk.
2018-07-25 18:09:52 +02:00
Alexander Simmerl
d212292f86 Merge pull request #2066 from tendermint/bucky/merge-master
Bucky/merge master
2018-07-25 17:36:39 +02:00
Ethan Buchman
7ad92c44cb Merge branch 'master' into bucky/merge-master 2018-07-25 11:34:32 -04:00
Ethan Buchman
5fdbcd70df Merge pull request #2056 from tendermint/hotfix/v0.22.6
Hotfix/v0.22.6
2018-07-25 11:18:00 -04:00
Dev Ojha
d149d8f96d github: update PR template to indicate changing pending changelog. (#2059) 2018-07-25 11:34:51 +04:00
Ethan Buchman
74b6cc9057 rpc: log error when we timeout getting validators from consensus (#2045) 2018-07-25 10:56:00 +04:00
Ethan Buchman
6046b99197 consensus: include evidence in proposed block parts. fixes #2050 2018-07-24 21:58:39 -04:00
Ethan Buchman
359898dcac p2p: fix conn leak. part of #2046 2018-07-24 21:53:37 -04:00
Ethan Buchman
5768b67162 changelog and version 2018-07-24 21:26:41 -04:00
Ethan Buchman
082557b7d4 rpc: validate height in abci_query 2018-07-24 21:25:47 -04:00
Ethan Buchman
8dc655dad2 rpc: fix /blockchain OOM #2049 2018-07-24 21:18:20 -04:00
Ethan Buchman
70d3783747 dep: revert updates
- leave protobuf on 1.1.1 for grpc import fixes
- amino v0.11 breaks tendermint - revert to v0.10.1
2018-07-24 21:01:14 -04:00
Anton Kaliaev
60378fd7f9 abci: remove fee (#2043)
Refs #1861

We don't use the fee field and its likely just confusing.

We can add backwards compatible priority (instead of fee) later.

Note priority is better than fee because it lets the app do the math on how to rank order transactions, rather than forcing that into tendermint (ie. if we return fee, priority would be fee/gas)
2018-07-24 17:28:26 +02:00
Anton Kaliaev
c248ce5ef6 p2p: Reject addrs coming from private peers (#2032)
Refs #1706
2018-07-24 17:10:58 +02:00
Anton Kaliaev
059a03a66a tools: clean up Makefile and remove LICENSE file (#2042)
* lean up Makefile and remove LICENSE file
* remove k8s and build LICENSE files
* remove non-existing target
2018-07-24 16:02:32 +02:00
Alexander Simmerl
abe5b63059 Merge pull request #2040 from tendermint/2027-socket-pv
fix acceptDeadline
2018-07-24 15:52:45 +02:00
Ethan Buchman
a657870b3d update dockerfile 2018-07-24 09:42:08 -04:00
Alexander Simmerl
7e7473ad41 Merge pull request #2044 from tendermint/2019-prometheus
do not overwrite metrics provider in node#NewNode
2018-07-24 15:41:44 +02:00
Anton Kaliaev
75a26ebd6d do not overwrite metrics provider in node#NewNode
also, make running Prometheus server optional.

Closes #2019
2018-07-24 16:01:11 +04:00
Anton Kaliaev
ad580e2734 fix acceptDeadline
before: 1.000000003s
after: 3.000000000s

Refs #2027
2018-07-24 10:51:12 +04:00
Ethan Buchman
f6705f02c7 fixes post merge 2018-07-23 23:39:22 -04:00
Ethan Buchman
ea31c4836a Merge branch 'develop' into jae/literefactor4 2018-07-23 23:28:14 -04:00
Ethan Buchman
f1093edbe2 remove accidental files 2018-07-23 23:11:53 -04:00
Ethan Buchman
b92860b6c4 Merge pull request #1805 from tendermint/jae/optimize_blockchain
Optimizing blockchain reactor.
2018-07-23 22:46:51 -04:00
Ethan Buchman
54d753e64e fix Gopkg, add changelog 2018-07-23 22:48:38 -04:00
Ethan Buchman
e1b48b16c4 Merge branch 'develop' into jae/optimize_blockchain 2018-07-23 22:16:34 -04:00
Ethan Buchman
7c07235649 Merge pull request #2036 from tendermint/master
Merge master to develop
2018-07-23 22:05:44 -04:00
Ethan Buchman
05a76fb517 Merge pull request #2029 from tendermint/release/0.22.5
Release/0.22.5
2018-07-23 21:57:43 -04:00
Ethan Buchman
15b112e669 mempool: chan bool -> chan struct{} 2018-07-23 21:06:47 -04:00
Ethan Buchman
2aef80bcff Merge pull request #2034 from tendermint/dev/fix_pkg_names
crypto: Fix package imports from the refactor
2018-07-23 20:34:34 -04:00
ValarDragon
f3d519c966 crypto: Fix package imports from the refactor 2018-07-23 16:14:21 -07:00
Anton Kaliaev
9962e598a0 reconnect to self-reported address if persistent peer is inbound (#2031)
* reconnect to self-reported address if persistent peer is inbound

* add a fixme
2018-07-23 21:15:08 +04:00
Anton Kaliaev
948b91e62e add missing changelog entries 2018-07-23 17:16:43 +04:00
Anton Kaliaev
1e05242297 update changelog and bump version to 0.22.5 2018-07-23 17:07:14 +04:00
Anton Kaliaev
94e8252607 #2021 follow up (#2028)
* update changelog

* txAvailable is always true

Refs #2021, #1920

* remove debug message

No additional value. `enterPropose` log message should be enough.

Refs #2021, #1920
2018-07-23 16:47:15 +04:00
Dev Ojha
eb7dea1b0d crypto/ed25519: Remove privkey.Generate method (#2022)
The privkey.Generate method here was a custom-made method for deriving
a private key from another private key. This function is currently
not used anywhere in our codebase, and has not been reviewed enough
that it would be secure to use. This removes that method. We should
adopt the official ed25519 HD derivation once that has been standardized,
in order to fulfill this need.

closes #2000
2018-07-23 15:35:13 +04:00
srmo
e36ce6f893 fix race condition on proposal height for published txs (#2021)
* #1920 try to fix race condition on proposal height for published txs

- related to create_empty_blocks=false
- published height for accepted tx can be wrong (too low)
- use the actual mempool height + 1 for the proposal
- expose Height() on mempool

* #1920 add initial test for mempool.Height()

- not sure how to test the lock
- can the mutex reference be of type Locker?
-- this way, we can use a "mock" of the mutex to test triggering

* #1920 use the ConsensusState height in favor of mempool

- gets rid of indirections
- doesn't need any "+1" magic

* #1920 cosmetic

- if we use cs.Height, it's enough to evaluate right before propose

* #1920 cleanup TODO and non-needed code

* #1920 add changelog entry
2018-07-23 15:34:45 +04:00
Dev Ojha
c5c1689591 crypto/secp256k1: Add godocs, remove indirection in privkeys (#2017)
* crypto/secp256k1: Add godocs, remove indirection in privkeys

The following was previously done for creating secp256k1 private keys:

First obtain privkey bytes. Then create a private key in the
underlying library, with scalar exponent equal to privKeyBytes.
(The method called was secp256k1.PrivKeyFromBytes,
fb90c334df/btcec/privkey.go (L21))

Then the private key was serialized using the underlying library, which just
returns back the bytes that comprised the scalar exponent, but padded to be
exactly 32 bytes.
fb90c334df/btcec/privkey.go (L70)

Thus the entire indirection of calling the underlying library can be avoided
by just ensuring that we pass in a 32 byte value. A test case has even be written
to show this more clearly in review.

* crypto/secp256k1: Address PR comments

Squash this commit

* crypto: Remove note about re-registering amino paths when unnecessary.

This commit should be squashed.
2018-07-21 08:52:04 +04:00
Jae Kwon
b41b89732d Update store.go
Revert to SetSync for saveABCIResponses() as per Ethan's feedback
2018-07-20 14:38:27 -07:00
Anton Kaliaev
5e96421d44 Merge pull request #1966 from tendermint/dev/refactor_crypto
crypto: Refactor to move files out of the top level directory
2018-07-20 22:48:41 +04:00
ValarDragon
c798702764 crypto: Remove Ed25519 and Secp256k1 suffix on GenPrivKey 2018-07-20 10:44:21 -07:00
ValarDragon
17c0029233 Merge remote-tracking branch 'origin/develop' into dev/refactor_crypto 2018-07-20 08:59:41 -07:00
Alexander Simmerl
0f2d97dffe Merge pull request #1742 from Liamsi/proto_files
Add Proto files for types.Header (incl. BlockId, Time, PartsSetHeader)
2018-07-20 17:43:25 +02:00
Alexander Simmerl
ed8714e40c Merge pull request #1965 from tendermint/693-part-2
make Block Header and Data non-pointers
2018-07-20 17:42:42 +02:00
Alexander Simmerl
6017d817e5 Merge pull request #2008 from tendermint/1772-rwmutex-cs
use RWMutex for consensus state
2018-07-20 17:24:55 +02:00
Alexander Simmerl
63835c0360 Merge pull request #2009 from tendermint/1772-call-validators-with-timeout
make `/status` RPC endpoint resistant to consensus halt
2018-07-20 17:22:16 +02:00
Alexander Simmerl
c82c60df11 rpc: Test Validator retrevial timeout 2018-07-20 17:08:55 +02:00
Dev Ojha
67762aec73 crypto/ed25519: Update the godocs (#2002)
This commit updates the godocs for the package, and adds an optimization
to the privkey.Pubkey() method.

The optimization is that in golang, the private key (due to interface
compatibility reasons) has a copy of the public key stored inside of it.
Therefore if this copy has already been computed, there is no need to
recompute it.
2018-07-20 10:09:30 +04:00
Anton Kaliaev
0fbb465b8f add protoc_all and protoc_grpc to .PHONY 2018-07-20 01:27:41 +04:00
Anton Kaliaev
2e75214316 update gogo to 1.1.1 and other misc. updates
Refs #1883
2018-07-20 01:27:41 +04:00
Anton Kaliaev
5be456e5b1 update grpc version to 1.13.0
Refs #1883
2018-07-20 01:27:41 +04:00
Anton Kaliaev
1bd5476854 make /status RPC endpoint resistant to consensus halt
Refs #1772
2018-07-19 11:26:50 +04:00
Anton Kaliaev
5037dd40c5 use RWMutex for consensus state
allows multiple RPC requests to query consensus state

Refs #1772
2018-07-19 10:49:12 +04:00
Liamsi
96818af9d5 fix protos to make all tests pass, document differences 2018-07-18 19:06:38 +02:00
ValarDragon
571e602f07 Merge remote-tracking branch 'origin/develop' into dev/refactor_crypto 2018-07-18 08:54:51 -07:00
ValarDragon
99e582d79a crypto: Refactor to move files out of the top level directory
Currently the top level directory contains basically all of the code
for the crypto package. This PR moves the crypto code into submodules
in a similar manner to what `golang/x/crypto` does. This improves code
organization.

Ref discussion: https://github.com/tendermint/tendermint/pull/1966

Closes #1956
2018-07-18 08:38:44 -07:00
Liamsi
a81ca93139 update to new (timestamp & empty structs) encoding in amino
- timestamps no longer have fixed length encoding
-
2018-07-18 16:37:15 +02:00
Liamsi
2744682e77 update to latest amino (pre) release v0.11.1
- also reformat code and order imports
2018-07-18 15:53:53 +02:00
Liamsi
d665c79cc9 WIP: more empty struct examples 2018-07-18 15:18:10 +02:00
Liamsi
3c38a25bbb add empty struct examples 2018-07-18 15:17:51 +02:00
Liamsi
0cd82fa166 add empty struct examples 2018-07-18 15:14:41 +02:00
Liamsi
99fa7f8132 everything works with https://github.com/tendermint/go-amino/pull/178 2018-07-18 15:14:41 +02:00
Liamsi
82104c9329 almost 2018-07-18 15:14:41 +02:00
Zach
40342bfa4a Update DOCS_README.md (#1985) 2018-07-18 13:32:17 +04:00
Anton Kaliaev
912fe477a4 Merge pull request #1987 from silasdavis/static-marshaler
Generate static marshalling methods for ABCI types to make compatible with downstream GRPC usage
2018-07-18 13:26:44 +04:00
Anton Kaliaev
b31ee798bd preserve original address and dial it instead of self-reported address (#1994)
Refs #1720
2018-07-18 13:23:29 +04:00
Jeremiah Andrews
6c4ca140ed Add private peer ID tracking to AddrBook (#1989)
* Add private peer ID tracking to AddrBook

* Remove private peer tracking/blocking from pex

* debug level msg when we fail to add private address
2018-07-18 13:22:09 +04:00
needkane
449846ccb2 NodeInfo version check: delete redundant code 2018-07-18 13:12:52 +04:00
Dev Ojha
3353bb99ae tools: Remove redundant grep -v vendors/ (#1996)
* tools: Remove redundant grep -v vendors/

This was used in conjunction with `go list <path>`, however `go list`
already ignores the vendor directory. This made this `grep -v` redundant.

* Missed an apostrophe
2018-07-17 21:33:00 +04:00
Silas Davis
b7e5cbeb3b Remove pb.go files from codecov
Signed-off-by: Silas Davis <silas@monax.io>
2018-07-17 17:42:30 +01:00
Silas Davis
21b900dceb Add gogo generated tests for pb.go files
Signed-off-by: Silas Davis <silas@monax.io>
2018-07-17 14:56:11 +01:00
Silas Davis
c9f92f465b Use pattern rule for protoc building and \\nolint in generated pb.go files
Signed-off-by: Silas Davis <silas@monax.io>
2018-07-17 14:20:49 +01:00
Silas Davis
398f3779cc Add gogoproto marshallers to proto files in order to make use of
gogoproto.nullable compatible with GRPC downstream of ABCI and libs
protbuf types
2018-07-17 11:51:38 +01:00
Zach
257622cf6b update rpc/core/doc.go
Closes #1932 (#1986)
2018-07-17 10:52:49 +04:00
Max Levy
07ad325b1a A link fixed (#1991)
To address the relocation of terraform-and-ansible.md under networks/, as per e54c0f804f (diff-95ac35ad12aa33ed70e9ff5db2229771)
2018-07-17 10:49:38 +04:00
Max Levy
76f5e92528 Fixed a link (#1992)
Broken by #e54c0f8
2018-07-17 10:44:59 +04:00
Dev Ojha
71859f8f3b common/rand: Remove exponential distribution functions (#1979)
We were computing these functions incorrectly.
I'm not sure what distribution these numbers are, but it isn't the
normal exponential distribution. (We're making the probability of
getting a number of a particular bitlength equal, but the number in
that bitlength thats gets chosen is uniformly chosen)

We weren't using these functions anywhere in our codebase, and they
had a nomenclature error. (There aren't exponentially distributed
integers, instead they would be geometrically distributed)
2018-07-16 11:38:04 +04:00
ValarDragon
a3df06d081 libs/common/rand: Update godocs
The godocs fell out of sync with the code here. Additionally we had
warning that these randomness functions weren't for cryptographic
use on every function. However these warnings are confusing, since
there was no implication that they would be secure there, and a
single warning on the actual Rand type would suffice. (This is what
is done in golang's math/rand godoc)

Additionally we indicated that rand.Bytes() was reading OS randomness
but in fact that had been changed.
2018-07-16 11:38:04 +04:00
Dev Ojha
dae7dc30e0 Switch usage of math/rand to cmn's rand (#1980)
This commit switches all usage of math/rand to cmn's rand. The only
exceptions are within the random file itself, the tools package, and the
crypto package. In tools you don't want it to lock between the go-routines.
The crypto package doesn't use it so the crypto package have no other
dependencies within tendermint/tendermint for easier portability.

Crypto/rand usage is unadjusted.

Closes #1343
2018-07-16 11:20:37 +04:00
Dev Ojha
14cebd181d config: 10x default send/recv rate (#1978)
* config: 10x default send/recv rate

This increases the default send/recv rate from 512 kB/s to 5.12 mB/s

Closes #1752

* Fix typo
2018-07-16 11:17:27 +04:00
Ethan Buchman
522a425708 Merge pull request #1975 from tendermint/bucky/1951-fix-protoc-libs
makefile: fix protoc_libs
2018-07-15 13:19:00 +01:00
Ethan Buchman
0fbcbb3aeb makefile: fix protoc_libs 2018-07-14 18:33:18 +01:00
Ethan Buchman
8a5930ad72 Merge pull request #1974 from tendermint/master
Merge master back to develop
2018-07-14 15:13:52 +01:00
Anton Kaliaev
270659f03f make Block Header and Data non-pointers
make BlockMeta Header a non-pointer

Refs #693
2018-07-13 12:05:54 +04:00
Ethan Buchman
9018acde5f tmlibs -> tendermint/libs 2018-07-02 14:58:07 -04:00
Ethan Buchman
5453aa6169 Merge branch 'develop' into jae/literefactor4 2018-07-02 14:57:30 -04:00
Jae Kwon
b51ed132f7 Fix test/p2p/pex circle tests; update consensus 2018-06-27 16:24:21 -07:00
Jae Kwon
8524a8da7f Try to fix circle... 2018-06-27 04:22:30 -07:00
Jae Kwon
cfcbc61449 oops 2018-06-27 04:04:33 -07:00
Jae Kwon
9184733261 try it with new consensus? 2018-06-27 02:34:11 -07:00
Jae Kwon
363146dacf just print node1 2018-06-27 02:03:15 -07:00
Jae Kwon
ad1b722898 bump for circle 2018-06-27 00:41:50 -07:00
Jae Kwon
8163b99a75 print docker output to console to debug circle 2018-06-27 00:37:53 -07:00
Jae Kwon
835c2ee74a Print 2018-06-27 00:09:04 -07:00
Jae Kwon
19fc4ac47c remove abci from gopkg.toml 2018-06-26 23:58:47 -07:00
Jae Kwon
acd976ad5b bump circle 2018-06-26 23:42:00 -07:00
Jae Kwon
37ef5485b4 Add logs to lite/*; Fix rpc status to return consensus height, not blockstore height 2018-06-26 16:53:06 -07:00
Anton Kaliaev
7f4498f8b1 remove no longer needed install_abci_apps script
Fixes
https://circleci.com/gh/tendermint/tendermint/12923?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link
2018-06-26 11:10:54 +04:00
Jae Kwon
538c410bcd Fixes from review 2018-06-25 18:16:16 -07:00
Jae Kwon
c3296f2e01 Garbage collect DBProvider (unoptimized); Certifier creation takes a client 2018-06-25 17:13:00 -07:00
Jae Kwon
242a6037e8 Fixes from review 2018-06-25 17:12:52 -07:00
Jae Kwon
bf0ff212b9 Refactor "lite" to handle delayed validator set changes.
Also, fix consensus liveness issue.
2018-06-25 17:12:25 -07:00
Jae Kwon
a5b7ea93c4 Delay validator set changes by 1 block. 2018-06-25 16:59:00 -07:00
Jae Kwon
8128627f08 Optimizing blockchain reactor.
Should be paired with https://github.com/tendermint/iavl/pull/65.
2018-06-22 21:47:48 -07:00
424 changed files with 34069 additions and 9509 deletions

1550
.circleci/codecov.sh Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -16,7 +16,7 @@ jobs:
- checkout
- restore_cache:
keys:
- v2-pkg-cache
- v3-pkg-cache
- run:
name: tools
command: |
@@ -38,13 +38,13 @@ jobs:
- bin
- profiles
- save_cache:
key: v2-pkg-cache
key: v3-pkg-cache
paths:
- /go/pkg
- save_cache:
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
paths:
- /go/src/github.com/tendermint/tendermint
# - save_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
# paths:
# - /go/src/github.com/tendermint/tendermint
build_slate:
<<: *defaults
@@ -52,9 +52,24 @@ jobs:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v2-pkg-cache
- restore_cache:
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
key: v3-pkg-cache
# https://discuss.circleci.com/t/saving-cache-stopped-working-warning-skipping-this-step-disabled-in-configuration/24423/2
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: slate docs
command: |
@@ -68,15 +83,35 @@ jobs:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v2-pkg-cache
- restore_cache:
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: metalinter
command: |
set -ex
export PATH="$GOBIN:$PATH"
make metalinter
- run:
name: check_dep
command: |
set -ex
export PATH="$GOBIN:$PATH"
make check_dep
test_abci_apps:
<<: *defaults
@@ -84,9 +119,23 @@ jobs:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v2-pkg-cache
- restore_cache:
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: Run abci apps tests
command: |
@@ -101,9 +150,23 @@ jobs:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v2-pkg-cache
- restore_cache:
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: Run abci-cli tests
command: |
@@ -116,9 +179,23 @@ jobs:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v2-pkg-cache
- restore_cache:
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils
- run:
name: Run tests
@@ -131,17 +208,31 @@ jobs:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v2-pkg-cache
- restore_cache:
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run: mkdir -p /tmp/logs
- run:
name: Run tests
command: |
for pkg in $(go list github.com/tendermint/tendermint/... | grep -v /vendor/ | circleci tests split --split-by=timings); do
for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do
id=$(basename "$pkg")
GOCACHE=off go test -v -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
GOCACHE=off go test -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
done
- persist_to_workspace:
root: /tmp/workspace
@@ -156,13 +247,49 @@ jobs:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v2-pkg-cache
- restore_cache:
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
key: v3-pkg-cache
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: Run tests
command: bash test/persist/test_failure_indices.sh
localnet:
working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
machine:
image: circleci/classic:latest
environment:
GOBIN: /home/circleci/.go_workspace/bin
GOPATH: /home/circleci/.go_workspace/
GOOS: linux
GOARCH: amd64
parallelism: 1
steps:
- checkout
- run:
name: run localnet and exit on failure
command: |
set -x
make get_tools
make get_vendor_deps
make build-linux
make localnet-start &
./scripts/localnet-blocks-test.sh 40 5 10 localhost
test_p2p:
environment:
GOBIN: /home/circleci/.go_workspace/bin
@@ -180,8 +307,22 @@ jobs:
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v2-tree-{{ .Environment.CIRCLE_SHA1 }}
# - restore_cache:
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
- checkout
- run:
name: tools
command: |
export PATH="$GOBIN:$PATH"
make get_tools
- run:
name: dependencies
command: |
export PATH="$GOBIN:$PATH"
make get_vendor_deps
- run: mkdir -p $GOPATH/src/github.com/tendermint
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
- run:
name: gather
command: |
@@ -193,7 +334,7 @@ jobs:
done
- run:
name: upload
command: bash <(curl -s https://codecov.io/bash) -f coverage.txt
command: bash .circleci/codecov.sh -f coverage.txt
workflows:
version: 2
@@ -218,6 +359,9 @@ workflows:
- test_persistence:
requires:
- setup_dependencies
- localnet:
requires:
- setup_dependencies
- test_p2p
- upload_coverage:
requires:

2
.github/CODEOWNERS vendored
View File

@@ -4,4 +4,4 @@
* @ebuchman @melekes @xla
# Precious documentation
/docs/ @zramsay @jolesbi
/docs/ @zramsay

View File

@@ -1,42 +0,0 @@
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
<!--
If this is a BUG REPORT, please:
- Fill in as much of the template below as you can.
If this is a FEATURE REQUEST, please:
- Describe *in detail* the feature/behavior/change you'd like to see.
In both cases, be ready for followup questions, and please respond in a timely
manner. We might ask you to provide additional logs and data (tendermint & app)
in a case of bug.
-->
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
**Environment**:
- **OS** (e.g. from /etc/os-release):
- **Install tools**:
- **Others**:
**What happened**:
**What you expected to happen**:
**How to reproduce it** (as minimally and precisely as possible):
**Logs (you can paste a small part showing an error or link a pastebin, gist, etc. containing more of the log file)**:
**Config (you can paste only the changes you've made)**:
**`/dump_consensus_state` output for consensus bugs**
**Anything else do we need to know**:

42
.github/ISSUE_TEMPLATE/bug-report.md vendored Normal file
View File

@@ -0,0 +1,42 @@
---
name: Bug Report
about: Create a report to help us squash bugs!
---
<!--
Please fill in as much of the template below as you can.
Be ready for followup questions, and please respond in a timely
manner. We might ask you to provide additional logs and data (tendermint & app).
-->
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
**ABCI app** (name for built-in, URL for self-written if it's publicly available):
**Environment**:
- **OS** (e.g. from /etc/os-release):
- **Install tools**:
- **Others**:
**What happened**:
**What you expected to happen**:
**Have you tried the latest version**: yes/no
**How to reproduce it** (as minimally and precisely as possible):
**Logs (paste a small part showing an error (< 10 lines) or link a pastebin, gist, etc. containing more of the log file)**:
**Config (you can paste only the changes you've made)**:
**node command runtime flags**:
**`/dump_consensus_state` output for consensus bugs**
**Anything else we need to know**:

View File

@@ -0,0 +1,13 @@
---
name: Feature Request
about: Create a proposal to request a feature
---
<!--
Please describe *in detail* the feature/behavior/change you'd like to see.
Be ready for followup questions, and please respond in a timely
manner.
Word of caution: poorly thought out proposals may be rejected without deliberation
-->

View File

@@ -3,4 +3,4 @@
* [ ] Updated all relevant documentation in docs
* [ ] Updated all code comments where relevant
* [ ] Wrote tests
* [ ] Updated CHANGELOG.md
* [ ] Updated CHANGELOG_PENDING.md

7
.gitignore vendored
View File

@@ -16,8 +16,8 @@ coverage.txt
docs/_build
*.log
abci-cli
abci/types/types.pb.go
docs/node_modules/
index.html.md
scripts/wal2json/wal2json
scripts/cutWALUntil/cutWALUntil
@@ -28,9 +28,14 @@ scripts/cutWALUntil/cutWALUntil
libs/pubsub/query/fuzz_test/output
shunit2
.tendermint-lite
addrbook.json
*/vendor
*/.glide
.terraform
terraform.tfstate
terraform.tfstate.backup
terraform.tfstate.d
.vscode

View File

@@ -1,5 +1,220 @@
# Changelog
## 0.24.0
*September 6th, 2018*
Special thanks to external contributors with PRs included in this release: ackratos, james-ray, bradyjoestar,
peerlink, Ahmah2009, bluele, b00f.
This release includes breaking upgrades in the block header,
including the long awaited changes for delaying validator set updates by one
block to better support light clients.
It also fixes enforcement on the maximum size of blocks, and includes a BFT
timestamp in each block that can be safely used by applications.
There are also some minor breaking changes to the rpc, config, and ABCI.
See the [UPGRADING.md](UPGRADING.md#v0.24.0) for details on upgrading to the new
version.
From here on, breaking changes will be broken down to better reflect how users
are affected by a change.
A few more breaking changes are in the works - each will come with a clear
Architecture Decision Record (ADR) explaining the change. You can review ADRs
[here](https://github.com/tendermint/tendermint/tree/develop/docs/architecture)
or in the [open Pull Requests](https://github.com/tendermint/tendermint/pulls).
You can also check in on the [issues marked as
breaking](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3Abreaking).
BREAKING CHANGES:
* CLI/RPC/Config
- [config] [\#2169](https://github.com/tendermint/tendermint/issues/2169) Replace MaxNumPeers with MaxNumInboundPeers and MaxNumOutboundPeers
- [config] [\#2300](https://github.com/tendermint/tendermint/issues/2300) Reduce default mempool size from 100k to 5k, until ABCI rechecking is implemented.
- [rpc] [\#1815](https://github.com/tendermint/tendermint/issues/1815) `/commit` returns a `signed_header` field instead of everything being top-level
* Apps
- [abci] Added address of the original proposer of the block to Header
- [abci] Change ABCI Header to match Tendermint exactly
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
[ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)):
- Remove PubKey from `Validator` (so it's just Address and Power)
- Introduce `ValidatorUpdate` (with just PubKey and Power)
- InitChain and EndBlock use ValidatorUpdate
- Update field names and types in BeginBlock
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block
- updates returned in ResponseEndBlock for block H will be included in RequestBeginBlock for block H+2
* Go API
- [lite] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Complete refactor of the package
- [node] [\#2212](https://github.com/tendermint/tendermint/issues/2212) NewNode now accepts a `*p2p.NodeKey` (@bradyjoestar)
- [libs/common] [\#2199](https://github.com/tendermint/tendermint/issues/2199) Remove Fmt, in favor of fmt.Sprintf
- [libs/common] SplitAndTrim was deleted
- [libs/common] [\#2274](https://github.com/tendermint/tendermint/issues/2274) Remove unused Math functions like MaxInt, MaxInt64,
MinInt, MinInt64 (@Ahmah2009)
- [libs/clist] Panics if list extends beyond MaxLength
- [crypto] [\#2205](https://github.com/tendermint/tendermint/issues/2205) Rename AminoRoute variables to no longer be prefixed by signature type.
* Blockchain Protocol
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
- Add NextValidatorSet to State, changes on-disk representation of state
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
[ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)).
- Remove ConsensusParams.BlockSize.MaxTxs
- Introduce maximum sizes for all components of a block, including ChainID
- [types] Updates to the block Header:
- [\#1815](https://github.com/tendermint/tendermint/issues/1815) NextValidatorsHash - hash of the validator set for the next block,
so the current validators actually sign over the hash for the new
validators
- [\#2106](https://github.com/tendermint/tendermint/issues/2106) ProposerAddress - address of the block's original proposer
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
[ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)):
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
- malleability removed by requiring `s` to be in canonical form.
* P2P Protocol
- [p2p] [\#2263](https://github.com/tendermint/tendermint/issues/2263) Update secret connection to use a little endian encoded nonce
- [blockchain] [\#2213](https://github.com/tendermint/tendermint/issues/2213) Fix Amino routes for blockchain reactor messages
(@peerlink)
FEATURES:
- [types] [\#2015](https://github.com/tendermint/tendermint/issues/2015) Allow genesis file to have 0 validators (@b00f)
- Initial validator set can be determined by the app in ResponseInitChain
- [rpc] [\#2161](https://github.com/tendermint/tendermint/issues/2161) New event `ValidatorSetUpdates` for when the validator set changes
- [crypto/multisig] [\#2164](https://github.com/tendermint/tendermint/issues/2164) Introduce multisig pubkey and signature format
- [libs/db] [\#2293](https://github.com/tendermint/tendermint/issues/2293) Allow passing options through when creating instances of leveldb dbs
IMPROVEMENTS:
- [docs] Lint documentation with `write-good` and `stop-words`.
- [docs] [\#2249](https://github.com/tendermint/tendermint/issues/2249) Refactor, deduplicate, and improve the ABCI docs and spec (with thanks to @ttmc).
- [scripts] [\#2196](https://github.com/tendermint/tendermint/issues/2196) Added json2wal tool, which is supposed to help our users restore (@bradyjoestar)
corrupted WAL files and compose test WAL files (@bradyjoestar)
- [mempool] [\#2234](https://github.com/tendermint/tendermint/issues/2234) Now stores txs by hash inside of the cache, to mitigate memory leakage
- [mempool] [\#2166](https://github.com/tendermint/tendermint/issues/2166) Set explicit capacity for map when updating txs (@bluele)
BUG FIXES:
- [config] [\#2284](https://github.com/tendermint/tendermint/issues/2284) Replace `db_path` with `db_dir` from automatically generated configuration files.
- [mempool] [\#2188](https://github.com/tendermint/tendermint/issues/2188) Fix OOM issue from cache map and list getting out of sync
- [state] [\#2051](https://github.com/tendermint/tendermint/issues/2051) KV store index supports searching by `tx.height` (@ackratos)
- [rpc] [\#2327](https://github.com/tendermint/tendermint/issues/2327) `/dial_peers` does not try to dial existing peers
- [node] [\#2323](https://github.com/tendermint/tendermint/issues/2323) Filter empty strings from config lists (@james-ray)
- [abci/client] [\#2236](https://github.com/tendermint/tendermint/issues/2236) Fix closing GRPC connection (@bradyjoestar)
## 0.23.1
*August 22nd, 2018*
BUG FIXES:
- [libs/autofile] \#2261 Fix log rotation so it actually happens.
- Fixes issues with consensus WAL growing unbounded ala \#2259
## 0.23.0
*August 5th, 2018*
This release includes breaking upgrades in our P2P encryption,
some ABCI messages, and how we encode time and signatures.
A few more changes are still coming to the Header, ABCI,
and validator set handling to better support light clients, BFT time, and
upgrades. Most notably, validator set changes will be delayed by one block (see
[#1815][i1815]).
We also removed `make ensure_deps` in favour of `make get_vendor_deps`.
BREAKING CHANGES:
- [abci] Changed time format from int64 to google.protobuf.Timestamp
- [abci] Changed Validators to LastCommitInfo in RequestBeginBlock
- [abci] Removed Fee from ResponseDeliverTx and ResponseCheckTx
- [crypto] Switch crypto.Signature from interface to []byte for space efficiency
[#2128](https://github.com/tendermint/tendermint/pull/2128)
- NOTE: this means signatures no longer have the prefix bytes in Amino
binary nor the `type` field in Amino JSON. They're just bytes.
- [p2p] Remove salsa and ripemd primitives, in favor of using chacha as a stream cipher, and hkdf [#2054](https://github.com/tendermint/tendermint/pull/2054)
- [tools] Removed `make ensure_deps` in favor of `make get_vendor_deps`
- [types] CanonicalTime uses nanoseconds instead of clipping to ms
- breaks serialization/signing of all messages with a timestamp
FEATURES:
- [tools] Added `make check_dep`
- ensures gopkg.lock is synced with gopkg.toml
- ensures no branches are used in the gopkg.toml
IMPROVEMENTS:
- [blockchain] Improve fast-sync logic
[#1805](https://github.com/tendermint/tendermint/pull/1805)
- tweak params
- only process one block at a time to avoid starving
- [common] bit array functions which take in another parameter are now thread safe
- [crypto] Switch hkdfchachapoly1305 to xchachapoly1305
- [p2p] begin connecting to peers as soon a seed node provides them to you ([#2093](https://github.com/tendermint/tendermint/issues/2093))
BUG FIXES:
- [common] Safely handle cases where atomic write files already exist [#2109](https://github.com/tendermint/tendermint/issues/2109)
- [privval] fix a deadline for accepting new connections in socket private
validator.
- [p2p] Allow startup if a configured seed node's IP can't be resolved ([#1716](https://github.com/tendermint/tendermint/issues/1716))
- [node] Fully exit when CTRL-C is pressed even if consensus state panics [#2072](https://github.com/tendermint/tendermint/issues/2072)
[i1815]: https://github.com/tendermint/tendermint/pull/1815
## 0.22.8
*July 26th, 2018*
BUG FIXES
- [consensus, blockchain] Fix 0.22.7 below.
## 0.22.7
*July 26th, 2018*
BUG FIXES
- [consensus, blockchain] Register the Evidence interface so it can be
marshalled/unmarshalled by the blockchain and consensus reactors
## 0.22.6
*July 24th, 2018*
BUG FIXES
- [rpc] Fix `/blockchain` endpoint
- (#2049) Fix OOM attack by returning error on negative input
- Fix result length to have max 20 (instead of 21) block metas
- [rpc] Validate height is non-negative in `/abci_query`
- [consensus] (#2050) Include evidence in proposal block parts (previously evidence was
not being included in blocks!)
- [p2p] (#2046) Close rejected inbound connections so file descriptor doesn't
leak
- [Gopkg] (#2053) Fix versions in the toml
## 0.22.5
*July 23th, 2018*
BREAKING CHANGES:
- [crypto] Refactor `tendermint/crypto` into many subpackages
- [libs/common] remove exponentially distributed random numbers
IMPROVEMENTS:
- [abci, libs/common] Generated gogoproto static marshaller methods
- [config] Increase default send/recv rates to 5 mB/s
- [p2p] reject addresses coming from private peers
- [p2p] allow persistent peers to be private
BUG FIXES:
- [mempool] fixed a race condition when `create_empty_blocks=false` where a
transaction is published at an old height.
- [p2p] dial external IP setup by `persistent_peers`, not internal NAT IP
- [rpc] make `/status` RPC endpoint resistant to consensus halt
## 0.22.4
*July 14th, 2018*
@@ -14,7 +229,8 @@ FEATURES:
BUG FIXES:
- [tools/tm-bench] Various fixes
- [consensus] Wait for WAL to stop on shutdown
- [abci] Fix #1891, pending requests cannot hang when abci server dies. Previously a crash in BeginBlock could leave tendermint in broken state.
- [abci] Fix #1891, pending requests cannot hang when abci server dies.
Previously a crash in BeginBlock could leave tendermint in broken state.
## 0.22.3
@@ -534,7 +750,7 @@ BREAKING CHANGES:
- use scripts/wal2json to convert to json for debugging
FEATURES:
- new `certifiers` pkg contains the tendermint light-client library (name subject to change)!
- new `Verifiers` pkg contains the tendermint light-client library (name subject to change)!
- rpc: `/genesis` includes the `app_options` .
- rpc: `/abci_query` takes an additional `height` parameter to support historical queries.
- rpc/client: new ABCIQueryWithOptions supports options like `trusted` (set false to get a proof) and `height` to query a historical height.

22
CHANGELOG_PENDING.md Normal file
View File

@@ -0,0 +1,22 @@
# Pending
Special thanks to external contributors with PRs included in this release:
BREAKING CHANGES:
* CLI/RPC/Config
* Apps
* Go API
* Blockchain Protocol
* P2P Protocol
FEATURES:
IMPROVEMENTS:
BUG FIXES:

221
Gopkg.lock generated
View File

@@ -3,48 +3,63 @@
[[projects]]
branch = "master"
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
name = "github.com/beorn7/perks"
packages = ["quantile"]
pruneopts = "UT"
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
branch = "master"
digest = "1:2c00f064ba355903866cbfbf3f7f4c0fe64af6638cc7d1b8bdcf3181bc67f1d8"
name = "github.com/btcsuite/btcd"
packages = ["btcec"]
revision = "fdfc19097e7ac6b57035062056f5b7b4638b8898"
pruneopts = "UT"
revision = "f5e261fc9ec3437697fb31d8b38453c293204b29"
[[projects]]
digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf"
name = "github.com/btcsuite/btcutil"
packages = [
"base58",
"bech32"
"bech32",
]
pruneopts = "UT"
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
[[projects]]
digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = "UT"
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
digest = "1:c7644c73a3d23741fdba8a99b1464e021a224b7e205be497271a8003a15ca41b"
name = "github.com/ebuchman/fail-test"
packages = ["."]
pruneopts = "UT"
revision = "95f809107225be108efcf10a3509e4ea6ceef3c4"
[[projects]]
digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70"
name = "github.com/fortytw2/leaktest"
packages = ["."]
pruneopts = "UT"
revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e"
version = "v1.2.0"
[[projects]]
digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd"
name = "github.com/fsnotify/fsnotify"
packages = ["."]
pruneopts = "UT"
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
version = "v1.4.7"
[[projects]]
digest = "1:fdf5169073fb0ad6dc12a70c249145e30f4058647bea25f0abd48b6d9f228a11"
name = "github.com/go-kit/kit"
packages = [
"log",
@@ -53,24 +68,30 @@
"metrics",
"metrics/discard",
"metrics/internal/lv",
"metrics/prometheus"
"metrics/prometheus",
]
pruneopts = "UT"
revision = "4dc7be5d2d12881735283bcab7352178e190fc71"
version = "v0.6.0"
[[projects]]
digest = "1:31a18dae27a29aa074515e43a443abfd2ba6deb6d69309d8d7ce789c45f34659"
name = "github.com/go-logfmt/logfmt"
packages = ["."]
pruneopts = "UT"
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
version = "v0.3.0"
[[projects]]
digest = "1:c4a2528ccbcabf90f9f3c464a5fc9e302d592861bbfd0b7135a7de8a943d0406"
name = "github.com/go-stack/stack"
packages = ["."]
pruneopts = "UT"
revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc"
version = "v1.7.0"
[[projects]]
digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e"
name = "github.com/gogo/protobuf"
packages = [
"gogoproto",
@@ -78,37 +99,45 @@
"proto",
"protoc-gen-gogo/descriptor",
"sortkeys",
"types"
"types",
]
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0"
pruneopts = "UT"
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
version = "v1.1.1"
[[projects]]
digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp"
"ptypes/timestamp",
]
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0"
pruneopts = "UT"
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:4a0c6bb4805508a6287675fac876be2ac1182539ca8a32468d8128882e9d5009"
name = "github.com/golang/snappy"
packages = ["."]
pruneopts = "UT"
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
[[projects]]
digest = "1:43dd08a10854b2056e615d1b1d22ac94559d822e1f8b6fcc92c1a1057e85188e"
name = "github.com/gorilla/websocket"
packages = ["."]
pruneopts = "UT"
revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
version = "v1.2.0"
[[projects]]
branch = "master"
digest = "1:12247a2e99a060cc692f6680e5272c8adf0b8f572e6bce0d7095e624c958a240"
name = "github.com/hashicorp/hcl"
packages = [
".",
@@ -119,154 +148,197 @@
"hcl/token",
"json/parser",
"json/scanner",
"json/token"
"json/token",
]
pruneopts = "UT"
revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168"
[[projects]]
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
pruneopts = "UT"
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
branch = "master"
digest = "1:39b27d1381a30421f9813967a5866fba35dc1d4df43a6eefe3b7a5444cb07214"
name = "github.com/jmhodges/levigo"
packages = ["."]
pruneopts = "UT"
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
[[projects]]
branch = "master"
digest = "1:a64e323dc06b73892e5bb5d040ced475c4645d456038333883f58934abbf6f72"
name = "github.com/kr/logfmt"
packages = ["."]
pruneopts = "UT"
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
[[projects]]
digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7"
name = "github.com/magiconair/properties"
packages = ["."]
pruneopts = "UT"
revision = "c2353362d570a7bfa228149c62842019201cfb71"
version = "v1.8.0"
[[projects]]
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
pruneopts = "UT"
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
branch = "master"
digest = "1:5ab79470a1d0fb19b041a624415612f8236b3c06070161a910562f2b2d064355"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b"
pruneopts = "UT"
revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac"
[[projects]]
digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e"
name = "github.com/pelletier/go-toml"
packages = ["."]
pruneopts = "UT"
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
version = "v1.2.0"
[[projects]]
digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = "UT"
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
pruneopts = "UT"
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
digest = "1:c1a04665f9613e082e1209cf288bf64f4068dcd6c87a64bf1c4ff006ad422ba0"
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
"prometheus/promhttp"
"prometheus/promhttp",
]
pruneopts = "UT"
revision = "ae27198cdd90bf12cd134ad79d1366a6cf49f632"
[[projects]]
branch = "master"
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
name = "github.com/prometheus/client_model"
packages = ["go"]
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
pruneopts = "UT"
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
[[projects]]
branch = "master"
digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model"
"model",
]
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
pruneopts = "UT"
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
[[projects]]
branch = "master"
digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/util",
"nfs",
"xfs"
"xfs",
]
revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a"
pruneopts = "UT"
revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92"
[[projects]]
digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
pruneopts = "UT"
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
[[projects]]
digest = "1:bd1ae00087d17c5a748660b8e89e1043e1e5479d0fea743352cda2f8dd8c4f84"
name = "github.com/spf13/afero"
packages = [
".",
"mem"
"mem",
]
pruneopts = "UT"
revision = "787d034dfe70e44075ccc060d346146ef53270ad"
version = "v1.1.1"
[[projects]]
digest = "1:516e71bed754268937f57d4ecb190e01958452336fa73dbac880894164e91c1f"
name = "github.com/spf13/cast"
packages = ["."]
pruneopts = "UT"
revision = "8965335b8c7107321228e3e3702cab9832751bac"
version = "v1.2.0"
[[projects]]
digest = "1:7ffc0983035bc7e297da3688d9fe19d60a420e9c38bef23f845c53788ed6a05e"
name = "github.com/spf13/cobra"
packages = ["."]
pruneopts = "UT"
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
version = "v0.0.1"
[[projects]]
branch = "master"
digest = "1:080e5f630945ad754f4b920e60b4d3095ba0237ebf88dc462eb28002932e3805"
name = "github.com/spf13/jwalterweatherman"
packages = ["."]
pruneopts = "UT"
revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394"
[[projects]]
digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "UT"
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
digest = "1:f8e1a678a2571e265f4bf91a3e5e32aa6b1474a55cb0ea849750cc177b664d96"
name = "github.com/spf13/viper"
packages = ["."]
pruneopts = "UT"
revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7"
version = "v1.0.0"
[[projects]]
digest = "1:7e8d267900c7fa7f35129a2a37596e38ed0f11ca746d6d9ba727980ee138f9f6"
name = "github.com/stretchr/testify"
packages = [
"assert",
"require"
"require",
]
pruneopts = "UT"
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
version = "v1.2.1"
[[projects]]
branch = "master"
digest = "1:b3cfb8d82b1601a846417c3f31c03a7961862cb2c98dcf0959c473843e6d9a2b"
name = "github.com/syndtr/goleveldb"
packages = [
"leveldb",
@@ -280,28 +352,41 @@
"leveldb/opt",
"leveldb/storage",
"leveldb/table",
"leveldb/util"
"leveldb/util",
]
pruneopts = "UT"
revision = "c4c61651e9e37fa117f53c5a906d3b63090d8445"
[[projects]]
digest = "1:605b6546f3f43745695298ec2d342d3e952b6d91cdf9f349bea9315f677d759f"
name = "github.com/tendermint/btcd"
packages = ["btcec"]
pruneopts = "UT"
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
[[projects]]
branch = "master"
digest = "1:087aaa7920e5d0bf79586feb57ce01c35c830396ab4392798112e8aae8c47722"
name = "github.com/tendermint/ed25519"
packages = [
".",
"edwards25519",
"extra25519"
"extra25519",
]
pruneopts = "UT"
revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057"
[[projects]]
digest = "1:e0a2a4be1e20c305badc2b0a7a9ab7fef6da500763bec23ab81df3b5f9eec9ee"
name = "github.com/tendermint/go-amino"
packages = ["."]
revision = "2106ca61d91029c931fd54968c2bb02dc96b1412"
version = "0.10.1"
pruneopts = "UT"
revision = "a8328986c1608950fa5d3d1c0472cccc4f8fc02c"
version = "v0.12.0-rc0"
[[projects]]
branch = "master"
digest = "1:c31a37cafc12315b8bd745c8ad6a006ac25350472488162a821e557b3e739d67"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
@@ -317,11 +402,13 @@
"openpgp/errors",
"poly1305",
"ripemd160",
"salsa20/salsa"
"salsa20/salsa",
]
revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602"
pruneopts = "UT"
revision = "56440b844dfe139a8ac053f4ecac0b20b79058f4"
[[projects]]
digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1"
name = "golang.org/x/net"
packages = [
"context",
@@ -331,20 +418,24 @@
"idna",
"internal/timeseries",
"netutil",
"trace"
"trace",
]
pruneopts = "UT"
revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f"
[[projects]]
branch = "master"
digest = "1:bb0fe59917bdd5b89f49b9a8b26e5f465e325d9223b3a8e32254314bdf51e0f1"
name = "golang.org/x/sys"
packages = [
"cpu",
"unix"
"unix",
]
revision = "1b2967e3c290b7c545b3db0deeda16e9be4f98a2"
pruneopts = "UT"
revision = "3dc4335d56c789b04b0ba99b7a37249d9b614314"
[[projects]]
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
name = "golang.org/x/text"
packages = [
"collate",
@@ -360,17 +451,22 @@
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
"unicode/rangetable",
]
pruneopts = "UT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
pruneopts = "UT"
revision = "daca94659cb50e9f37c1b834680f2e46358f10b0"
[[projects]]
digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74"
name = "google.golang.org/grpc"
packages = [
".",
@@ -382,9 +478,11 @@
"credentials",
"encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"internal",
"internal/backoff",
"internal/channelz",
"internal/grpcrand",
"keepalive",
"metadata",
"naming",
@@ -395,20 +493,71 @@
"stats",
"status",
"tap",
"transport"
"transport",
]
revision = "d11072e7ca9811b1100b80ca0269ac831f06d024"
version = "v1.11.3"
pruneopts = "UT"
revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
version = "v1.13.0"
[[projects]]
digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "UT"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "b0718135d5ade0a75c6b8fe703f70eb9d8064ba871ec31abd9ace3c4ab944100"
input-imports = [
"github.com/btcsuite/btcutil/base58",
"github.com/btcsuite/btcutil/bech32",
"github.com/ebuchman/fail-test",
"github.com/fortytw2/leaktest",
"github.com/go-kit/kit/log",
"github.com/go-kit/kit/log/level",
"github.com/go-kit/kit/log/term",
"github.com/go-kit/kit/metrics",
"github.com/go-kit/kit/metrics/discard",
"github.com/go-kit/kit/metrics/prometheus",
"github.com/go-logfmt/logfmt",
"github.com/gogo/protobuf/gogoproto",
"github.com/gogo/protobuf/jsonpb",
"github.com/gogo/protobuf/proto",
"github.com/gogo/protobuf/types",
"github.com/golang/protobuf/proto",
"github.com/golang/protobuf/ptypes/timestamp",
"github.com/gorilla/websocket",
"github.com/jmhodges/levigo",
"github.com/pkg/errors",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",
"github.com/rcrowley/go-metrics",
"github.com/spf13/cobra",
"github.com/spf13/viper",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/require",
"github.com/syndtr/goleveldb/leveldb",
"github.com/syndtr/goleveldb/leveldb/errors",
"github.com/syndtr/goleveldb/leveldb/iterator",
"github.com/syndtr/goleveldb/leveldb/opt",
"github.com/tendermint/btcd/btcec",
"github.com/tendermint/ed25519",
"github.com/tendermint/ed25519/extra25519",
"github.com/tendermint/go-amino",
"golang.org/x/crypto/bcrypt",
"golang.org/x/crypto/chacha20poly1305",
"golang.org/x/crypto/curve25519",
"golang.org/x/crypto/hkdf",
"golang.org/x/crypto/nacl/box",
"golang.org/x/crypto/nacl/secretbox",
"golang.org/x/crypto/openpgp/armor",
"golang.org/x/crypto/ripemd160",
"golang.org/x/net/context",
"golang.org/x/net/netutil",
"google.golang.org/grpc",
"google.golang.org/grpc/credentials",
]
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -10,11 +10,6 @@
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
@@ -35,11 +30,11 @@
[[constraint]]
name = "github.com/gogo/protobuf"
version = "=1.0.0"
version = "=1.1.1"
[[constraint]]
name = "github.com/golang/protobuf"
version = "=1.0.0"
version = "=1.1.0"
[[constraint]]
name = "github.com/gorilla/websocket"
@@ -63,11 +58,11 @@
[[constraint]]
name = "github.com/tendermint/go-amino"
version = "=0.10.1"
version = "v0.12.0-rc0"
[[constraint]]
name = "google.golang.org/grpc"
version = "=1.11.3"
version = "=1.13.0"
[[constraint]]
name = "github.com/fortytw2/leaktest"
@@ -77,19 +72,9 @@
## Some repos dont have releases.
## Pin to revision
## We can remove this one by updating protobuf to v1.1.0
## but then the grpc tests break with
#--- FAIL: TestBroadcastTx (0.01s)
#panic: message/group field common.KVPair:bytes without pointer [recovered]
# panic: message/group field common.KVPair:bytes without pointer
#
# ...
#
# github.com/tendermint/tendermint/rpc/grpc_test.TestBroadcastTx(0xc420a5ab40)
# /go/src/github.com/tendermint/tendermint/rpc/grpc/grpc_test.go:29 +0x141
[[override]]
name = "google.golang.org/genproto"
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
name = "github.com/jmhodges/levigo"
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
[[constraint]]
name = "github.com/ebuchman/fail-test"
@@ -100,6 +85,10 @@
name = "github.com/btcsuite/btcutil"
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
[[constraint]]
name = "github.com/tendermint/btcd"
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
# Haven't made a release since 2016.
[[constraint]]
name = "github.com/prometheus/client_golang"

View File

@@ -3,41 +3,50 @@ GOTOOLS = \
github.com/golang/dep/cmd/dep \
gopkg.in/alecthomas/gometalinter.v2 \
github.com/gogo/protobuf/protoc-gen-gogo \
github.com/gogo/protobuf/gogoproto \
github.com/square/certstrap
PACKAGES=$(shell go list ./... | grep -v '/vendor/')
PACKAGES=$(shell go list ./...)
INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf
BUILD_TAGS?=tendermint
BUILD_TAGS?='tendermint'
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`"
LINT_FLAGS = --exclude '.*\.pb\.go' --vendor --deadline=600s
all: check build test install
check: check_tools ensure_deps
check: check_tools get_vendor_deps
########################################
### Build Tendermint
build:
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/
build_race:
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint
install:
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
########################################
### Protobuf
protoc_all: protoc_libs protoc_abci protoc_grpc
%.pb.go: %.proto
## If you get the following error,
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
## See https://stackoverflow.com/a/25518702
## Note the $< here is substituted for the %.proto
## Note the $@ here is substituted for the %.pb.go
protoc $(INCLUDE) $< --gogo_out=Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,plugins=grpc:.
########################################
### Build ABCI
protoc_abci:
## If you get the following error,
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
## See https://stackoverflow.com/a/25518702
protoc $(INCLUDE) --gogo_out=plugins=grpc:. abci/types/*.proto
@echo "--> adding nolint declarations to protobuf generated files"
@awk '/package abci/types/ { print "//nolint: gas"; print; next }1' abci/types/types.pb.go > abci/types/types.pb.go.new
@mv abci/types/types.pb.go.new abci/types/types.pb.go
# see protobuf section above
protoc_abci: abci/types/types.pb.go
build_abci:
@go build -i ./abci/cmd/...
@@ -51,7 +60,7 @@ install_abci:
# dist builds binaries for all platforms and packages them for distribution
# TODO add abci to these scripts
dist:
@BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'"
@BUILD_TAGS=$(BUILD_TAGS) sh -c "'$(CURDIR)/scripts/dist.sh'"
########################################
### Tools & dependencies
@@ -68,31 +77,25 @@ get_tools:
update_tools:
@echo "--> Updating tools"
@go get -u $(GOTOOLS)
go get -u -v $(GOTOOLS)
#Run this from CI
#Update dependencies
get_vendor_deps:
@rm -rf vendor/
@echo "--> Running dep"
@dep ensure -vendor-only
#Run this locally.
ensure_deps:
@rm -rf vendor/
@echo "--> Running dep"
@dep ensure
#For ABCI and libs
get_protoc:
@# https://github.com/google/protobuf/releases
curl -L https://github.com/google/protobuf/releases/download/v3.4.1/protobuf-cpp-3.4.1.tar.gz | tar xvz && \
cd protobuf-3.4.1 && \
curl -L https://github.com/google/protobuf/releases/download/v3.6.1/protobuf-cpp-3.6.1.tar.gz | tar xvz && \
cd protobuf-3.6.1 && \
DIST_LANG=cpp ./configure && \
make && \
make install && \
make check && \
sudo make install && \
sudo ldconfig && \
cd .. && \
rm -rf protobuf-3.4.1
rm -rf protobuf-3.6.1
draw_deps:
@# requires brew install graphviz or apt-get install graphviz
@@ -101,21 +104,14 @@ draw_deps:
get_deps_bin_size:
@# Copy of build recipe with additional flags to perform binary size analysis
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ 2>&1))
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ 2>&1))
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
########################################
### Libs
protoc_libs:
## If you get the following error,
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
## See https://stackoverflow.com/a/25518702
protoc $(INCLUDE) --go_out=plugins=grpc:. libs/common/*.proto
@echo "--> adding nolint declarations to protobuf generated files"
@awk '/package libs/common/ { print "//nolint: gas"; print; next }1' libs/common/types.pb.go > libs/common/types.pb.go.new
@mv libs/common/types.pb.go.new libs/common/types.pb.go
protoc_libs: libs/common/types.pb.go
gen_certs: clean_certs
## Generating certificates for TLS testing...
@@ -130,12 +126,14 @@ clean_certs:
rm -f db/remotedb/::.crt db/remotedb/::.key
test_libs: gen_certs
GOCACHE=off go test -tags gcc $(shell go list ./... | grep -v vendor)
GOCACHE=off go test -tags gcc $(PACKAGES)
make clean_certs
grpc_dbserver:
protoc -I db/remotedb/proto/ db/remotedb/proto/defs.proto --go_out=plugins=grpc:db/remotedb/proto
protoc_grpc: rpc/grpc/types.pb.go
########################################
### Testing
@@ -206,11 +204,11 @@ vagrant_test:
### go tests
test:
@echo "--> Running go test"
@go test $(PACKAGES)
@GOCACHE=off go test -p 1 $(PACKAGES)
test_race:
@echo "--> Running go test --race"
@go test -v -race $(PACKAGES)
@GOCACHE=off go test -p 1 -v -race $(PACKAGES)
########################################
@@ -221,7 +219,7 @@ fmt:
metalinter:
@echo "--> Running linter"
@gometalinter.v2 --vendor --deadline=600s --disable-all \
@gometalinter.v2 $(LINT_FLAGS) --disable-all \
--enable=deadcode \
--enable=gosimple \
--enable=misspell \
@@ -250,7 +248,17 @@ metalinter:
metalinter_all:
@echo "--> Running linter (all)"
gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./...
gometalinter.v2 $(LINT_FLAGS) --enable-all --disable=lll ./...
DESTINATION = ./index.html.md
rpc-docs:
cat rpc/core/slate_header.txt > $(DESTINATION)
godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's,/src/target,https://github.com/tendermint/tendermint/tree/master/rpc/core,' >> $(DESTINATION)
check_dep:
dep status >> /dev/null
!(grep -n branch Gopkg.toml)
###########################################################
### Docker image
@@ -307,4 +315,4 @@ build-slate:
# To avoid unintended conflicts with file names, always add to .PHONY
# unless there is a reason not to.
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
.PHONY: check build build_race build_abci dist install install_abci check_tools get_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate
.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all

View File

@@ -22,7 +22,7 @@ develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/deve
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
and securely replicates it on many machines.
For protocol details, see [the specification](/docs/spec).
For protocol details, see [the specification](/docs/spec). For a consensus proof and detailed protocol analysis checkout our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
## A Note on Production Readiness
@@ -50,13 +50,13 @@ Go version | Go1.9 or higher
## Install
See the [install instructions](/docs/install.md)
See the [install instructions](/docs/introduction/install.md)
## Quick Start
- [Single node](/docs/using-tendermint.md)
- [Local cluster using docker-compose](/networks/local)
- [Remote cluster using terraform and ansible](/docs/terraform-and-ansible.md)
- [Remote cluster using terraform and ansible](/docs/networks/terraform-and-ansible.md)
- [Join the public testnet](https://cosmos.network/testnet)
## Resources

66
UPGRADING.md Normal file
View File

@@ -0,0 +1,66 @@
# Upgrading Tendermint Core
This guide provides steps to be followed when you upgrade your applications to
a newer version of Tendermint Core.
## v0.24.0
New 0.24.0 release contains a lot of changes to the state and types. It's not
compatible to the old versions and there is no straight forward way to update
old data to be compatible with the new version.
To reset the state do:
```
$ tendermint unsafe_reset_all
```
Here we summarize some other notable changes to be mindful of.
### Config changes
`p2p.max_num_peers` was removed in favor of `p2p.max_num_inbound_peers` and
`p2p.max_num_outbound_peers`.
```
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
```
As you can see, the default ratio of inbound/outbound peers is 4/1. The reason
is we want it to be easier for new nodes to connect to the network. You can
tweak these parameters to alter the network topology.
### RPC Changes
The result of `/commit` used to contain `header` and `commit` fields at the top level. These are now contained under the `signed_header` field.
### ABCI Changes
The header has been upgraded and contains new fields, but none of the existing
fields were changed, except their order.
The `Validator` type was split into two, one containing an `Address` and one
containing a `PubKey`. When processing `RequestBeginBlock`, use the `Validator`
type, which contains just the `Address`. When returning `ResponseEndBlock`, use
the `ValidatorUpdate` type, which contains just the `PubKey`.
### Validator Set Updates
Validator set updates returned in ResponseEndBlock for height `H` used to take
effect immediately at height `H+1`. Now they will be delayed one block, to take
effect at height `H+2`. Note this means that the change will be seen by the ABCI
app in the `RequestBeginBlock.LastCommitInfo` at block `H+3`. Apps were already
required to maintain a map from validator addresses to pubkeys since v0.23 (when
pubkeys were removed from RequestBeginBlock), but now they may need to track
multiple validator sets at once to accomodate this delay.
### Block Size
The `ConsensusParams.BlockSize.MaxTxs` was removed in favour of
`ConsensusParams.BlockSize.MaxBytes`, which is now enforced. This means blocks
are limitted only by byte-size, not by number of transactions.

View File

@@ -17,19 +17,19 @@ The community has provided a number of addtional implementations, see the [Tende
A detailed description of the ABCI methods and message types is contained in:
- [A prose specification](specification.md)
- [A protobuf file](https://github.com/tendermint/abci/blob/master/types/types.proto)
- [A Go interface](https://github.com/tendermint/abci/blob/master/types/application.go).
- [A protobuf file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto)
- [A Go interface](https://github.com/tendermint/tendermint/blob/master/abci/types/application.go).
For more background information on ABCI, motivations, and tendermint, please visit [the documentation](http://tendermint.readthedocs.io/en/master/).
For more background information on ABCI, motivations, and tendermint, please visit [the documentation](https://tendermint.com/docs/).
The two guides to focus on are the `Application Development Guide` and `Using ABCI-CLI`.
## Protocl Buffers
## Protocol Buffers
To compile the protobuf file, run:
```
make protoc
cd $GOPATH/src/github.com/tendermint/tendermint/; make protoc_abci
```
See `protoc --help` and [the Protocol Buffers site](https://developers.google.com/protocol-buffers)
@@ -42,10 +42,13 @@ The `abci-cli` is a simple tool for debugging ABCI servers and running some
example apps. To install it:
```
go get github.com/tendermint/abci
cd $GOPATH/src/github.com/tendermint/abci
mkdir -p $GOPATH/src/github.com/tendermint
cd $GOPATH/src/github.com/tendermint
git clone https://github.com/tendermint/tendermint.git
cd tendermint
make get_tools
make get_vendor_deps
make install
make install_abci
```
## Implementation
@@ -91,7 +94,7 @@ Note the length-prefixing used in the socket implementation does not apply for G
The `abci-cli` tool wraps an ABCI client and can be used for probing/testing an ABCI server.
For instance, `abci-cli test` will run a test sequence against a listening server running the Counter application (see below).
It can also be used to run some example applications.
See [the documentation](http://tendermint.readthedocs.io/en/master/) for more details.
See [the documentation](https://tendermint.com/docs/) for more details.
### Examples

View File

@@ -22,6 +22,7 @@ type grpcClient struct {
mustConnect bool
client types.ABCIApplicationClient
conn *grpc.ClientConn
mtx sync.Mutex
addr string
@@ -60,10 +61,11 @@ RETRY_LOOP:
cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
client := types.NewABCIApplicationClient(conn)
cli.conn = conn
ENSURE_CONNECTED:
for {
_, err := client.Echo(context.Background(), &types.RequestEcho{"hello"}, grpc.FailFast(true))
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.FailFast(true))
if err == nil {
break ENSURE_CONNECTED
}
@@ -78,12 +80,10 @@ RETRY_LOOP:
func (cli *grpcClient) OnStop() {
cli.BaseService.OnStop()
cli.mtx.Lock()
defer cli.mtx.Unlock()
// TODO: how to close conn? its not a net.Conn and grpc doesn't expose a Close()
/*if cli.client.conn != nil {
cli.client.conn.Close()
}*/
if cli.conn != nil {
cli.conn.Close()
}
}
func (cli *grpcClient) StopForError(err error) {
@@ -129,7 +129,7 @@ func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_Echo{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{res}})
}
func (cli *grpcClient) FlushAsync() *ReqRes {
@@ -138,7 +138,7 @@ func (cli *grpcClient) FlushAsync() *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_Flush{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{res}})
}
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
@@ -147,7 +147,7 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_Info{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{res}})
}
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
@@ -156,7 +156,7 @@ func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_SetOption{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{res}})
}
func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
@@ -165,7 +165,7 @@ func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_DeliverTx{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{res}})
}
func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
@@ -174,7 +174,7 @@ func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_CheckTx{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{res}})
}
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
@@ -183,7 +183,7 @@ func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_Query{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{res}})
}
func (cli *grpcClient) CommitAsync() *ReqRes {
@@ -192,7 +192,7 @@ func (cli *grpcClient) CommitAsync() *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_Commit{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{res}})
}
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
@@ -201,7 +201,7 @@ func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_InitChain{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{res}})
}
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
@@ -210,7 +210,7 @@ func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_BeginBlock{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{res}})
}
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
@@ -219,7 +219,7 @@ func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
if err != nil {
cli.StopForError(err)
}
return cli.finishAsyncCall(req, &types.Response{&types.Response_EndBlock{res}})
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{res}})
}
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {

View File

@@ -149,7 +149,7 @@ func (app *localClient) FlushSync() error {
}
func (app *localClient) EchoSync(msg string) (*types.ResponseEcho, error) {
return &types.ResponseEcho{msg}, nil
return &types.ResponseEcho{Message: msg}, nil
}
func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {

View File

@@ -477,11 +477,8 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
}
func cmdUnimplemented(cmd *cobra.Command, args []string) error {
// TODO: Print out all the sub-commands available
msg := "unimplemented command"
if err := cmd.Help(); err != nil {
msg = err.Error()
}
if len(args) > 0 {
msg += fmt.Sprintf(" args: [%s]", strings.Join(args, " "))
}
@@ -489,6 +486,17 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
Code: codeBad,
Log: msg,
})
fmt.Println("Available commands:")
fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short)
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short)
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short)
fmt.Println("Use \"[command] --help\" for more information about a command.")
return nil
}
@@ -514,7 +522,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
if len(args) == 1 {
version = args[0]
}
res, err := client.InfoSync(types.RequestInfo{version})
res, err := client.InfoSync(types.RequestInfo{Version: version})
if err != nil {
return err
}
@@ -537,7 +545,7 @@ func cmdSetOption(cmd *cobra.Command, args []string) error {
}
key, val := args[0], args[1]
_, err := client.SetOptionSync(types.RequestSetOption{key, val})
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: val})
if err != nil {
return err
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
type CounterApplication struct {
@@ -22,7 +21,7 @@ func NewCounterApplication(serial bool) *CounterApplication {
}
func (app *CounterApplication) Info(req types.RequestInfo) types.ResponseInfo {
return types.ResponseInfo{Data: cmn.Fmt("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
}
func (app *CounterApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
@@ -34,7 +33,7 @@ func (app *CounterApplication) SetOption(req types.RequestSetOption) types.Respo
TODO Panic and have the ABCI server pass an exception.
The client can call SetOptionSync() and get an `error`.
return types.ResponseSetOption{
Error: cmn.Fmt("Unknown key (%s) or value (%s)", key, value),
Error: fmt.Sprintf("Unknown key (%s) or value (%s)", key, value),
}
*/
return types.ResponseSetOption{}
@@ -95,10 +94,10 @@ func (app *CounterApplication) Commit() (resp types.ResponseCommit) {
func (app *CounterApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
switch reqQuery.Path {
case "hash":
return types.ResponseQuery{Value: []byte(cmn.Fmt("%v", app.hashCount))}
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
case "tx":
return types.ResponseQuery{Value: []byte(cmn.Fmt("%v", app.txCount))}
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
default:
return types.ResponseQuery{Log: cmn.Fmt("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
}
}

View File

@@ -7,6 +7,8 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"golang.org/x/net/context"
@@ -37,13 +39,13 @@ func TestGRPC(t *testing.T) {
}
func testStream(t *testing.T, app types.Application) {
numDeliverTxs := 200000
numDeliverTxs := 20000
// Start the listener
server := abciserver.NewSocketServer("unix://test.sock", app)
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
if err := server.Start(); err != nil {
t.Fatalf("Error starting socket server: %v", err.Error())
require.NoError(t, err, "Error starting socket server")
}
defer server.Stop()
@@ -70,7 +72,7 @@ func testStream(t *testing.T, app types.Application) {
}
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 2) // Wait for a bit to allow counter overflow
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
close(done)
}()
return
@@ -132,7 +134,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) {
// Write requests
for counter := 0; counter < numDeliverTxs; counter++ {
// Send request
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{[]byte("test")})
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
if err != nil {
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
}
@@ -146,7 +148,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) {
t.Log("response", counter)
if counter == numDeliverTxs {
go func() {
time.Sleep(time.Second * 2) // Wait for a bit to allow counter overflow
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
}()
}

View File

@@ -7,12 +7,10 @@ import (
// RandVal creates one random validator, with a key derived
// from the input value
func RandVal(i int) types.Validator {
addr := cmn.RandBytes(20)
func RandVal(i int) types.ValidatorUpdate {
pubkey := cmn.RandBytes(32)
power := cmn.RandUint16() + 1
v := types.Ed25519Validator(pubkey, int64(power))
v.Address = addr
v := types.Ed25519ValidatorUpdate(pubkey, int64(power))
return v
}
@@ -20,8 +18,8 @@ func RandVal(i int) types.Validator {
// the application. Note that the keys are deterministically
// derived from the index in the array, while the power is
// random (Change this if not desired)
func RandVals(cnt int) []types.Validator {
res := make([]types.Validator, cnt)
func RandVals(cnt int) []types.ValidatorUpdate {
res := make([]types.ValidatorUpdate, cnt)
for i := 0; i < cnt; i++ {
res[i] = RandVal(i)
}

View File

@@ -81,8 +81,8 @@ func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
app.state.Size += 1
tags := []cmn.KVPair{
{[]byte("app.creator"), []byte("jae")},
{[]byte("app.key"), key},
{Key: []byte("app.creator"), Value: []byte("jae")},
{Key: []byte("app.key"), Value: key},
}
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags}
}

View File

@@ -2,6 +2,7 @@ package kvstore
import (
"bytes"
"fmt"
"io/ioutil"
"sort"
"testing"
@@ -90,8 +91,8 @@ func TestPersistentKVStoreInfo(t *testing.T) {
header := types.Header{
Height: int64(height),
}
kvstore.BeginBlock(types.RequestBeginBlock{hash, header, nil, nil})
kvstore.EndBlock(types.RequestEndBlock{header.Height})
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
kvstore.Commit()
resInfo = kvstore.Info(types.RequestInfo{})
@@ -121,11 +122,11 @@ func TestValUpdates(t *testing.T) {
vals1, vals2 := vals[:nInit], kvstore.Validators()
valsEqual(t, vals1, vals2)
var v1, v2, v3 types.Validator
var v1, v2, v3 types.ValidatorUpdate
// add some validators
v1, v2 = vals[nInit], vals[nInit+1]
diff := []types.Validator{v1, v2}
diff := []types.ValidatorUpdate{v1, v2}
tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power)
tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power)
@@ -139,7 +140,7 @@ func TestValUpdates(t *testing.T) {
v1.Power = 0
v2.Power = 0
v3.Power = 0
diff = []types.Validator{v1, v2, v3}
diff = []types.ValidatorUpdate{v1, v2, v3}
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power)
tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power)
@@ -157,18 +158,18 @@ func TestValUpdates(t *testing.T) {
} else {
v1.Power = 5
}
diff = []types.Validator{v1}
diff = []types.ValidatorUpdate{v1}
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
makeApplyBlock(t, kvstore, 3, diff, tx1)
vals1 = append([]types.Validator{v1}, vals1[1:]...)
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
vals2 = kvstore.Validators()
valsEqual(t, vals1, vals2)
}
func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.Validator, txs ...[]byte) {
func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, txs ...[]byte) {
// make and apply block
height := int64(heightInt)
hash := []byte("foo")
@@ -176,13 +177,13 @@ func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff
Height: height,
}
kvstore.BeginBlock(types.RequestBeginBlock{hash, header, nil, nil})
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
for _, tx := range txs {
if r := kvstore.DeliverTx(tx); r.IsErr() {
t.Fatal(r)
}
}
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{header.Height})
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
kvstore.Commit()
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
@@ -190,12 +191,12 @@ func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff
}
// order doesn't matter
func valsEqual(t *testing.T, vals1, vals2 []types.Validator) {
func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
if len(vals1) != len(vals2) {
t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1))
}
sort.Sort(types.Validators(vals1))
sort.Sort(types.Validators(vals2))
sort.Sort(types.ValidatorUpdates(vals1))
sort.Sort(types.ValidatorUpdates(vals2))
for i, v1 := range vals1 {
v2 := vals2[i]
if !bytes.Equal(v1.PubKey.Data, v2.PubKey.Data) ||
@@ -207,7 +208,7 @@ func valsEqual(t *testing.T, vals1, vals2 []types.Validator) {
func makeSocketClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) {
// Start the listener
socket := cmn.Fmt("unix://%s.sock", name)
socket := fmt.Sprintf("unix://%s.sock", name)
logger := log.TestingLogger()
server := abciserver.NewSocketServer(socket, app)
@@ -229,7 +230,7 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client,
func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) {
// Start the listener
socket := cmn.Fmt("unix://%s.sock", name)
socket := fmt.Sprintf("unix://%s.sock", name)
logger := log.TestingLogger()
gapp := types.NewGRPCApplication(app)

View File

@@ -9,7 +9,6 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
)
@@ -26,7 +25,7 @@ type PersistentKVStoreApplication struct {
app *KVStoreApplication
// validator set
ValUpdates []types.Validator
ValUpdates []types.ValidatorUpdate
logger log.Logger
}
@@ -102,7 +101,7 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
// Track the block hash and header information
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
// reset valset changes
app.ValUpdates = make([]types.Validator, 0)
app.ValUpdates = make([]types.ValidatorUpdate, 0)
return types.ResponseBeginBlock{}
}
@@ -114,11 +113,11 @@ func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) typ
//---------------------------------------------
// update validators
func (app *PersistentKVStoreApplication) Validators() (validators []types.Validator) {
func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) {
itr := app.app.state.db.Iterator(nil, nil)
for ; itr.Valid(); itr.Next() {
if isValidatorTx(itr.Key()) {
validator := new(types.Validator)
validator := new(types.ValidatorUpdate)
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
if err != nil {
panic(err)
@@ -130,7 +129,7 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida
}
func MakeValSetChangeTx(pubkey types.PubKey, power int64) []byte {
return []byte(cmn.Fmt("val:%X/%d", pubkey.Data, power))
return []byte(fmt.Sprintf("val:%X/%d", pubkey.Data, power))
}
func isValidatorTx(tx []byte) bool {
@@ -168,11 +167,11 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
}
// update
return app.updateValidator(types.Ed25519Validator(pubkey, int64(power)))
return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, int64(power)))
}
// add, update, or remove a validator
func (app *PersistentKVStoreApplication) updateValidator(v types.Validator) types.ResponseDeliverTx {
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
key := []byte("val:" + string(v.PubKey.Data))
if v.Power == 0 {
// remove validator

View File

@@ -12,11 +12,11 @@ import (
func InitChain(client abcicli.Client) error {
total := 10
vals := make([]types.Validator, total)
vals := make([]types.ValidatorUpdate, total)
for i := 0; i < total; i++ {
pubkey := cmn.RandBytes(33)
power := cmn.RandInt()
vals[i] = types.Ed25519Validator(pubkey, int64(power))
vals[i] = types.Ed25519ValidatorUpdate(pubkey, int64(power))
}
_, err := client.InitChainSync(types.RequestInitChain{
Validators: vals,

View File

@@ -26,7 +26,7 @@ func startClient(abciType string) abcicli.Client {
}
func setOption(client abcicli.Client, key, value string) {
_, err := client.SetOptionSync(types.RequestSetOption{key, value})
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
if err != nil {
panicf("setting %v=%v: \nerr: %v", key, value, err)
}

View File

@@ -85,7 +85,7 @@ func NewGRPCApplication(app Application) *GRPCApplication {
}
func (app *GRPCApplication) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) {
return &ResponseEcho{req.Message}, nil
return &ResponseEcho{Message: req.Message}, nil
}
func (app *GRPCApplication) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) {

View File

@@ -71,7 +71,7 @@ func encodeVarint(w io.Writer, i int64) (err error) {
func ToRequestEcho(message string) *Request {
return &Request{
Value: &Request_Echo{&RequestEcho{message}},
Value: &Request_Echo{&RequestEcho{Message: message}},
}
}
@@ -95,13 +95,13 @@ func ToRequestSetOption(req RequestSetOption) *Request {
func ToRequestDeliverTx(tx []byte) *Request {
return &Request{
Value: &Request_DeliverTx{&RequestDeliverTx{tx}},
Value: &Request_DeliverTx{&RequestDeliverTx{Tx: tx}},
}
}
func ToRequestCheckTx(tx []byte) *Request {
return &Request{
Value: &Request_CheckTx{&RequestCheckTx{tx}},
Value: &Request_CheckTx{&RequestCheckTx{Tx: tx}},
}
}
@@ -139,13 +139,13 @@ func ToRequestEndBlock(req RequestEndBlock) *Request {
func ToResponseException(errStr string) *Response {
return &Response{
Value: &Response_Exception{&ResponseException{errStr}},
Value: &Response_Exception{&ResponseException{Error: errStr}},
}
}
func ToResponseEcho(message string) *Response {
return &Response{
Value: &Response_Echo{&ResponseEcho{message}},
Value: &Response_Echo{&ResponseEcho{Message: message}},
}
}

View File

@@ -22,7 +22,7 @@ func TestMarshalJSON(t *testing.T) {
Data: []byte("hello"),
GasWanted: 43,
Tags: []cmn.KVPair{
{[]byte("pho"), []byte("bo")},
{Key: []byte("pho"), Value: []byte("bo")},
},
}
b, err = json.Marshal(&r1)
@@ -83,9 +83,8 @@ func TestWriteReadMessage2(t *testing.T) {
Log: phrase,
GasWanted: 10,
Tags: []cmn.KVPair{
cmn.KVPair{[]byte("abc"), []byte("def")},
cmn.KVPair{Key: []byte("abc"), Value: []byte("def")},
},
// Fee: cmn.KI64Pair{
},
// TODO: add the rest
}

View File

@@ -4,8 +4,8 @@ const (
PubKeyEd25519 = "ed25519"
)
func Ed25519Validator(pubkey []byte, power int64) Validator {
return Validator{
func Ed25519ValidatorUpdate(pubkey []byte, power int64) ValidatorUpdate {
return ValidatorUpdate{
// Address:
PubKey: PubKey{
Type: PubKeyEd25519,

File diff suppressed because it is too large Load Diff

View File

@@ -4,12 +4,22 @@ package types;
// For more information on gogo.proto, see:
// https://github.com/gogo/protobuf/blob/master/extensions.md
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "github.com/tendermint/tmlibs/common/types.proto";
import "google/protobuf/timestamp.proto";
import "github.com/tendermint/tendermint/libs/common/types.proto";
// This file is copied from http://github.com/tendermint/abci
// NOTE: When using custom types, mind the warnings.
// https://github.com/gogo/protobuf/blob/master/custom_types.md#warnings-and-issues
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.goproto_registration) = true;
// Generate tests
option (gogoproto.populate_all) = true;
option (gogoproto.equal_all) = true;
option (gogoproto.testgen_all) = true;
//----------------------------------------
// Request types
@@ -47,10 +57,10 @@ message RequestSetOption {
}
message RequestInitChain {
int64 time = 1;
google.protobuf.Timestamp time = 1 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
string chain_id = 2;
ConsensusParams consensus_params = 3;
repeated Validator validators = 4 [(gogoproto.nullable)=false];
repeated ValidatorUpdate validators = 4 [(gogoproto.nullable)=false];
bytes app_state_bytes = 5;
}
@@ -61,10 +71,11 @@ message RequestQuery {
bool prove = 4;
}
// NOTE: validators here have empty pubkeys.
message RequestBeginBlock {
bytes hash = 1;
Header header = 2 [(gogoproto.nullable)=false];
repeated SigningValidator validators = 3 [(gogoproto.nullable)=false];
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable)=false];
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable)=false];
}
@@ -132,7 +143,7 @@ message ResponseSetOption {
message ResponseInitChain {
ConsensusParams consensus_params = 1;
repeated Validator validators = 2 [(gogoproto.nullable)=false];
repeated ValidatorUpdate validators = 2 [(gogoproto.nullable)=false];
}
message ResponseQuery {
@@ -159,7 +170,6 @@ message ResponseCheckTx {
int64 gas_wanted = 5;
int64 gas_used = 6;
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
common.KI64Pair fee = 8 [(gogoproto.nullable)=false];
}
message ResponseDeliverTx {
@@ -170,11 +180,10 @@ message ResponseDeliverTx {
int64 gas_wanted = 5;
int64 gas_used = 6;
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
common.KI64Pair fee = 8 [(gogoproto.nullable)=false];
}
message ResponseEndBlock {
repeated Validator validator_updates = 1 [(gogoproto.nullable)=false];
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable)=false];
ConsensusParams consensus_param_updates = 2;
repeated common.KVPair tags = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
}
@@ -195,14 +204,13 @@ message ConsensusParams {
BlockGossip block_gossip = 3;
}
// BlockSize contain limits on the block size.
// BlockSize contains limits on the block size.
message BlockSize {
int32 max_bytes = 1;
int32 max_txs = 2;
int64 max_gas = 3;
int64 max_gas = 2;
}
// TxSize contain limits on the tx size.
// TxSize contains limits on the tx size.
message TxSize {
int32 max_bytes = 1;
int64 max_gas = 2;
@@ -215,38 +223,66 @@ message BlockGossip {
int32 block_part_size_bytes = 1;
}
message LastCommitInfo {
int32 round = 1;
repeated VoteInfo votes = 2 [(gogoproto.nullable)=false];
}
//----------------------------------------
// Blockchain Types
// just the minimum the app might need
message Header {
// basics
// basic block info
string chain_id = 1 [(gogoproto.customname)="ChainID"];
int64 height = 2;
int64 time = 3;
// txs
int32 num_txs = 4;
google.protobuf.Timestamp time = 3 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
int64 num_txs = 4;
int64 total_txs = 5;
// hashes
bytes last_block_hash = 6;
bytes validators_hash = 7;
bytes app_hash = 8;
// prev block info
BlockID last_block_id = 6 [(gogoproto.nullable)=false];
// consensus
Validator proposer = 9 [(gogoproto.nullable)=false];
// hashes of block data
bytes last_commit_hash = 7; // commit from validators from the last block
bytes data_hash = 8; // transactions
// hashes from the app output from the prev block
bytes validators_hash = 9; // validators for the current block
bytes next_validators_hash = 10; // validators for the next block
bytes consensus_hash = 11; // consensus params for current block
bytes app_hash = 12; // state after txs from the previous block
bytes last_results_hash = 13;// root hash of all results from the txs from the previous block
// consensus info
bytes evidence_hash = 14; // evidence included in the block
bytes proposer_address = 15; // original proposer of the block
}
message BlockID {
bytes hash = 1;
PartSetHeader parts_header = 2 [(gogoproto.nullable)=false];
}
message PartSetHeader {
int32 total = 1;
bytes hash = 2;
}
// Validator
message Validator {
bytes address = 1;
PubKey pub_key = 2 [(gogoproto.nullable)=false];
//PubKey pub_key = 2 [(gogoproto.nullable)=false];
int64 power = 3;
}
// Validator with an extra bool
message SigningValidator {
// ValidatorUpdate
message ValidatorUpdate {
PubKey pub_key = 1 [(gogoproto.nullable)=false];
int64 power = 2;
}
// VoteInfo
message VoteInfo {
Validator validator = 1 [(gogoproto.nullable)=false];
bool signed_last_block = 2;
}
@@ -260,7 +296,7 @@ message Evidence {
string type = 1;
Validator validator = 2 [(gogoproto.nullable)=false];
int64 height = 3;
int64 time = 4;
google.protobuf.Timestamp time = 4 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
int64 total_voting_power = 5;
}

View File

@@ -1,31 +0,0 @@
package types
import (
"testing"
asrt "github.com/stretchr/testify/assert"
)
func TestConsensusParams(t *testing.T) {
assert := asrt.New(t)
params := &ConsensusParams{
BlockSize: &BlockSize{MaxGas: 12345},
BlockGossip: &BlockGossip{BlockPartSizeBytes: 54321},
}
var noParams *ConsensusParams // nil
// no error with nil fields
assert.Nil(noParams.GetBlockSize())
assert.EqualValues(noParams.GetBlockSize().GetMaxGas(), 0)
// get values with real fields
assert.NotNil(params.GetBlockSize())
assert.EqualValues(params.GetBlockSize().GetMaxTxs(), 0)
assert.EqualValues(params.GetBlockSize().GetMaxGas(), 12345)
assert.NotNil(params.GetBlockGossip())
assert.EqualValues(params.GetBlockGossip().GetBlockPartSizeBytes(), 54321)
assert.Nil(params.GetTxSize())
assert.EqualValues(params.GetTxSize().GetMaxBytes(), 0)
}

4737
abci/types/typespb_test.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -2,58 +2,33 @@ package types
import (
"bytes"
"encoding/json"
"sort"
cmn "github.com/tendermint/tendermint/libs/common"
)
//------------------------------------------------------------------------------
// Validators is a list of validators that implements the Sort interface
type Validators []Validator
// ValidatorUpdates is a list of validators that implements the Sort interface
type ValidatorUpdates []ValidatorUpdate
var _ sort.Interface = (Validators)(nil)
var _ sort.Interface = (ValidatorUpdates)(nil)
// All these methods for Validators:
// All these methods for ValidatorUpdates:
// Len, Less and Swap
// are for Validators to implement sort.Interface
// are for ValidatorUpdates to implement sort.Interface
// which will be used by the sort package.
// See Issue https://github.com/tendermint/abci/issues/212
func (v Validators) Len() int {
func (v ValidatorUpdates) Len() int {
return len(v)
}
// XXX: doesn't distinguish same validator with different power
func (v Validators) Less(i, j int) bool {
func (v ValidatorUpdates) Less(i, j int) bool {
return bytes.Compare(v[i].PubKey.Data, v[j].PubKey.Data) <= 0
}
func (v Validators) Swap(i, j int) {
func (v ValidatorUpdates) Swap(i, j int) {
v1 := v[i]
v[i] = v[j]
v[j] = v1
}
func ValidatorsString(vs Validators) string {
s := make([]validatorPretty, len(vs))
for i, v := range vs {
s[i] = validatorPretty{
Address: v.Address,
PubKey: v.PubKey.Data,
Power: v.Power,
}
}
b, err := json.Marshal(s)
if err != nil {
panic(err.Error())
}
return string(b)
}
type validatorPretty struct {
Address cmn.HexBytes `json:"address"`
PubKey []byte `json:"pub_key"`
Power int64 `json:"power"`
}

View File

@@ -1,9 +1,9 @@
package version
// NOTE: we should probably be versioning the ABCI and the abci-cli separately
import (
"github.com/tendermint/tendermint/version"
)
const Maj = "0"
const Min = "12"
const Fix = "0"
// TODO: eliminate this after some version refactor
const Version = "0.12.0"
const Version = version.ABCIVersion

View File

@@ -7,7 +7,7 @@ import (
"github.com/tendermint/go-amino"
proto "github.com/tendermint/tendermint/benchmarks/proto"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/p2p"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
@@ -16,7 +16,7 @@ func BenchmarkEncodeStatusWire(b *testing.B) {
b.StopTimer()
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
status := &ctypes.ResultStatus{
NodeInfo: p2p.NodeInfo{
ID: nodeKey.ID(),
@@ -52,7 +52,7 @@ func BenchmarkEncodeNodeInfoWire(b *testing.B) {
b.StopTimer()
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
nodeInfo := p2p.NodeInfo{
ID: nodeKey.ID(),
Moniker: "SOMENAME",
@@ -77,7 +77,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
b.StopTimer()
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
nodeInfo := p2p.NodeInfo{
ID: nodeKey.ID(),
Moniker: "SOMENAME",
@@ -98,7 +98,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
func BenchmarkEncodeNodeInfoProto(b *testing.B) {
b.StopTimer()
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
nodeID := string(nodeKey.ID())
someName := "SOMENAME"
someAddr := "SOMEADDR"

View File

@@ -6,8 +6,8 @@ import (
"fmt"
"time"
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
cmn "github.com/tendermint/tendermint/libs/common"
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
)
func main() {

View File

@@ -29,10 +29,10 @@ eg, L = latency = 0.1s
*/
const (
requestIntervalMS = 100
maxTotalRequesters = 1000
requestIntervalMS = 2
maxTotalRequesters = 600
maxPendingRequests = maxTotalRequesters
maxPendingRequestsPerPeer = 50
maxPendingRequestsPerPeer = 20
// Minimum recv rate to ensure we're receiving blocks from a peer fast
// enough. If a peer is not sending us data at at least that rate, we
@@ -219,14 +219,12 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
defer pool.mtx.Unlock()
request := pool.requesters[height]
if request.block == nil {
panic("Expected block to be non-nil")
peerID := request.getPeerID()
if peerID != p2p.ID("") {
// RemovePeer will redo all requesters associated with this peer.
pool.removePeer(peerID)
}
// RemovePeer will redo all requesters associated with this peer.
pool.removePeer(request.peerID)
return request.peerID
return peerID
}
// TODO: ensure that blocks come in order for each peer.
@@ -367,10 +365,10 @@ func (pool *BlockPool) debug() string {
nextHeight := pool.height + pool.requestersLen()
for h := pool.height; h < nextHeight; h++ {
if pool.requesters[h] == nil {
str += cmn.Fmt("H(%v):X ", h)
str += fmt.Sprintf("H(%v):X ", h)
} else {
str += cmn.Fmt("H(%v):", h)
str += cmn.Fmt("B?(%v) ", pool.requesters[h].block != nil)
str += fmt.Sprintf("H(%v):", h)
str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil)
}
}
return str

View File

@@ -1,7 +1,6 @@
package blockchain
import (
"math/rand"
"testing"
"time"
@@ -25,7 +24,7 @@ func makePeers(numPeers int, minHeight, maxHeight int64) map[p2p.ID]testPeer {
peers := make(map[p2p.ID]testPeer, numPeers)
for i := 0; i < numPeers; i++ {
peerID := p2p.ID(cmn.RandStr(12))
height := minHeight + rand.Int63n(maxHeight-minHeight)
height := minHeight + cmn.RandInt63n(maxHeight-minHeight)
peers[peerID] = testPeer{peerID, height}
}
return peers
@@ -80,7 +79,7 @@ func TestBasic(t *testing.T) {
}
// Request desired, pretend like we got the block immediately.
go func() {
block := &types.Block{Header: &types.Header{Height: request.Height}}
block := &types.Block{Header: types.Header{Height: request.Height}}
pool.AddBlock(request.PeerID, block, 123)
t.Logf("Added block from peer %v (height: %v)", request.PeerID, request.Height)
}()

View File

@@ -18,7 +18,8 @@ const (
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
BlockchainChannel = byte(0x40)
trySyncIntervalMS = 50
trySyncIntervalMS = 10
// stop syncing when last block's time is
// within this much of the system time.
// stopSyncingDurationMinutes = 10
@@ -76,8 +77,9 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *Bl
store.Height()))
}
const capacity = 1000 // must be bigger than peers count
requestsCh := make(chan BlockRequest, capacity)
requestsCh := make(chan BlockRequest, maxTotalRequesters)
const capacity = 1000 // must be bigger than peers count
errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock
pool := NewBlockPool(
@@ -107,9 +109,6 @@ func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
// OnStart implements cmn.Service.
func (bcR *BlockchainReactor) OnStart() error {
if err := bcR.BaseReactor.OnStart(); err != nil {
return err
}
if bcR.fastSync {
err := bcR.pool.Start()
if err != nil {
@@ -122,7 +121,6 @@ func (bcR *BlockchainReactor) OnStart() error {
// OnStop implements cmn.Service.
func (bcR *BlockchainReactor) OnStop() {
bcR.BaseReactor.OnStop()
bcR.pool.Stop()
}
@@ -203,13 +201,12 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
// Got a peer status. Unverified.
bcR.pool.SetPeerHeight(src.ID(), msg.Height)
default:
bcR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
}
// Handle messages from the poolReactor telling the reactor what to do.
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.)
func (bcR *BlockchainReactor) poolRoutine() {
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
@@ -224,6 +221,8 @@ func (bcR *BlockchainReactor) poolRoutine() {
lastHundred := time.Now()
lastRate := 0.0
didProcessCh := make(chan struct{}, 1)
FOR_LOOP:
for {
select {
@@ -239,14 +238,17 @@ FOR_LOOP:
// The pool handles timeouts, just let it go.
continue FOR_LOOP
}
case err := <-bcR.errorsCh:
peer := bcR.Switch.Peers().Get(err.peerID)
if peer != nil {
bcR.Switch.StopPeerForError(peer, err)
}
case <-statusUpdateTicker.C:
// ask for status updates
go bcR.BroadcastStatusRequest() // nolint: errcheck
case <-switchToConsensusTicker.C:
height, numPending, lenRequesters := bcR.pool.GetStatus()
outbound, inbound, _ := bcR.Switch.NumPeers()
@@ -261,60 +263,78 @@ FOR_LOOP:
break FOR_LOOP
}
case <-trySyncTicker.C: // chan time
// This loop can be slow as long as it's doing syncing work.
SYNC_LOOP:
for i := 0; i < 10; i++ {
// See if there are any blocks to sync.
first, second := bcR.pool.PeekTwoBlocks()
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
if first == nil || second == nil {
// We need both to sync the first block.
break SYNC_LOOP
select {
case didProcessCh <- struct{}{}:
default:
}
case <-didProcessCh:
// NOTE: It is a subtle mistake to process more than a single block
// at a time (e.g. 10) here, because we only TrySend 1 request per
// loop. The ratio mismatch can result in starving of blocks, a
// sudden burst of requests and responses, and repeat.
// Consequently, it is better to split these routines rather than
// coupling them as it's written here. TODO uncouple from request
// routine.
// See if there are any blocks to sync.
first, second := bcR.pool.PeekTwoBlocks()
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
if first == nil || second == nil {
// We need both to sync the first block.
continue FOR_LOOP
} else {
// Try again quickly next loop.
didProcessCh <- struct{}{}
}
firstParts := first.MakePartSet(state.ConsensusParams.BlockPartSizeBytes)
firstPartsHeader := firstParts.Header()
firstID := types.BlockID{first.Hash(), firstPartsHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err := state.Validators.VerifyCommit(
chainID, firstID, first.Height, second.LastCommit)
if err != nil {
bcR.Logger.Error("Error in validation", "err", err)
peerID := bcR.pool.RedoRequest(first.Height)
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
// NOTE: we've already removed the peer's request, but we
// still need to clean up the rest.
bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
}
firstParts := first.MakePartSet(state.ConsensusParams.BlockPartSizeBytes)
firstPartsHeader := firstParts.Header()
firstID := types.BlockID{first.Hash(), firstPartsHeader}
// Finally, verify the first block using the second's commit
// NOTE: we can probably make this more efficient, but note that calling
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
// currently necessary.
err := state.Validators.VerifyCommit(
chainID, firstID, first.Height, second.LastCommit)
continue FOR_LOOP
} else {
bcR.pool.PopRequest()
// TODO: batch saves so we dont persist to disk every block
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: same thing for app - but we would need a way to
// get the hash without persisting the state
var err error
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
if err != nil {
bcR.Logger.Error("Error in validation", "err", err)
peerID := bcR.pool.RedoRequest(first.Height)
peer := bcR.Switch.Peers().Get(peerID)
if peer != nil {
bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
}
break SYNC_LOOP
} else {
bcR.pool.PopRequest()
// TODO This is bad, are we zombie?
cmn.PanicQ(fmt.Sprintf("Failed to process committed block (%d:%X): %v",
first.Height, first.Hash(), err))
}
blocksSynced++
// TODO: batch saves so we dont persist to disk every block
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
// TODO: same thing for app - but we would need a way to
// get the hash without persisting the state
var err error
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
if err != nil {
// TODO This is bad, are we zombie?
cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v",
first.Height, first.Hash(), err))
}
blocksSynced++
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
lastHundred = time.Now()
}
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
lastHundred = time.Now()
}
}
continue FOR_LOOP
case <-bcR.Quit():
break FOR_LOOP
}
@@ -336,11 +356,11 @@ type BlockchainMessage interface{}
func RegisterBlockchainMessages(cdc *amino.Codec) {
cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/mempool/BlockRequest", nil)
cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/mempool/BlockResponse", nil)
cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/mempool/NoBlockResponse", nil)
cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/mempool/StatusResponse", nil)
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil)
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil)
cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil)
cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil)
cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil)
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil)
}
func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
@@ -358,7 +378,7 @@ type bcBlockRequestMessage struct {
}
func (m *bcBlockRequestMessage) String() string {
return cmn.Fmt("[bcBlockRequestMessage %v]", m.Height)
return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height)
}
type bcNoBlockResponseMessage struct {
@@ -366,7 +386,7 @@ type bcNoBlockResponseMessage struct {
}
func (brm *bcNoBlockResponseMessage) String() string {
return cmn.Fmt("[bcNoBlockResponseMessage %d]", brm.Height)
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", brm.Height)
}
//-------------------------------------
@@ -376,7 +396,7 @@ type bcBlockResponseMessage struct {
}
func (m *bcBlockResponseMessage) String() string {
return cmn.Fmt("[bcBlockResponseMessage %v]", m.Block.Height)
return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height)
}
//-------------------------------------
@@ -386,7 +406,7 @@ type bcStatusRequestMessage struct {
}
func (m *bcStatusRequestMessage) String() string {
return cmn.Fmt("[bcStatusRequestMessage %v]", m.Height)
return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height)
}
//-------------------------------------
@@ -396,5 +416,5 @@ type bcStatusResponseMessage struct {
}
func (m *bcStatusResponseMessage) String() string {
return cmn.Fmt("[bcStatusResponseMessage %v]", m.Height)
return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height)
}

View File

@@ -156,7 +156,7 @@ func makeTxs(height int64) (txs []types.Tx) {
}
func makeBlock(height int64, state sm.State) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit))
block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit), nil, state.Validators.GetProposer().Address)
return block
}
@@ -206,3 +206,4 @@ func (tp *bcrTestPeer) IsPersistent() bool { return true }
func (tp *bcrTestPeer) Get(s string) interface{} { return s }
func (tp *bcrTestPeer) Set(string, interface{}) {}
func (tp *bcrTestPeer) RemoteIP() net.IP { return []byte{127, 0, 0, 1} }
func (tp *bcrTestPeer) OriginalAddr() *p2p.NetAddress { return nil }

View File

@@ -148,10 +148,10 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
}
height := block.Height
if g, w := height, bs.Height()+1; g != w {
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
}
if !blockParts.IsComplete() {
cmn.PanicSanity(cmn.Fmt("BlockStore can only save complete block part sets"))
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save complete block part sets"))
}
// Save block meta
@@ -188,7 +188,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
if height != bs.Height()+1 {
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
}
partBytes := cdc.MustMarshalBinaryBare(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes)
@@ -224,7 +224,7 @@ type BlockStoreStateJSON struct {
func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
bytes, err := cdc.MarshalJSON(bsj)
if err != nil {
cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err))
cmn.PanicSanity(fmt.Sprintf("Could not marshal state bytes: %v", err))
}
db.SetSync(blockStoreKey, bytes)
}

View File

@@ -6,7 +6,6 @@ import (
"runtime/debug"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -14,6 +13,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
func TestLoadBlockStoreStateJSON(t *testing.T) {
@@ -49,7 +49,7 @@ func TestNewBlockStore(t *testing.T) {
return nil, nil
})
require.NotNil(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data)
assert.Contains(t, panicErr.Error(), tt.wantErr, "#%d data: %q", i, tt.data)
assert.Contains(t, fmt.Sprintf("%#v", panicErr), tt.wantErr, "#%d data: %q", i, tt.data)
}
db.Set(blockStoreKey, nil)
@@ -70,7 +70,7 @@ var (
part1 = partSet.GetPart(0)
part2 = partSet.GetPart(1)
seenCommit1 = &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: time.Now().UTC()}}}
Timestamp: tmtime.Now()}}}
)
// TODO: This test should be simplified ...
@@ -91,7 +91,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
block := makeBlock(bs.Height()+1, state)
validPartSet := block.MakePartSet(2)
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: time.Now().UTC()}}}
Timestamp: tmtime.Now()}}}
bs.SaveBlock(block, partSet, seenCommit)
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
@@ -103,7 +103,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
Height: 1,
NumTxs: 100,
ChainID: "block_test",
Time: time.Now(),
Time: tmtime.Now(),
}
header2 := header1
header2.Height = 4
@@ -111,7 +111,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
// End of setup, test data
commitAtH10 := &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: time.Now().UTC()}}}
Timestamp: tmtime.Now()}}}
tuples := []struct {
block *types.Block
parts *types.PartSet
@@ -126,7 +126,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
eraseSeenCommitInDB bool
}{
{
block: newBlock(&header1, commitAtH10),
block: newBlock(header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
},
@@ -137,19 +137,19 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
},
{
block: newBlock(&header2, commitAtH10),
block: newBlock(header2, commitAtH10),
parts: uncontiguousPartSet,
wantPanic: "only save contiguous blocks", // and incomplete and uncontiguous parts
},
{
block: newBlock(&header1, commitAtH10),
block: newBlock(header1, commitAtH10),
parts: incompletePartSet,
wantPanic: "only save complete block", // incomplete parts
},
{
block: newBlock(&header1, commitAtH10),
block: newBlock(header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
corruptCommitInDB: true, // Corrupt the DB's commit entry
@@ -157,7 +157,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
},
{
block: newBlock(&header1, commitAtH10),
block: newBlock(header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
wantPanic: "unmarshal to types.BlockMeta failed",
@@ -165,7 +165,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
},
{
block: newBlock(&header1, commitAtH10),
block: newBlock(header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
@@ -174,7 +174,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
},
{
block: newBlock(&header1, commitAtH10),
block: newBlock(header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
@@ -183,7 +183,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
},
{
block: newBlock(&header1, commitAtH10),
block: newBlock(header1, commitAtH10),
parts: validPartSet,
seenCommit: seenCommit1,
@@ -238,7 +238,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
if subStr := tuple.wantPanic; subStr != "" {
if panicErr == nil {
t.Errorf("#%d: want a non-nil panic", i)
} else if got := panicErr.Error(); !strings.Contains(got, subStr) {
} else if got := fmt.Sprintf("%#v", panicErr); !strings.Contains(got, subStr) {
t.Errorf("#%d:\n\tgotErr: %q\nwant substring: %q", i, got, subStr)
}
continue
@@ -335,7 +335,7 @@ func TestBlockFetchAtHeight(t *testing.T) {
partSet := block.MakePartSet(2)
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
Timestamp: time.Now().UTC()}}}
Timestamp: tmtime.Now()}}}
bs.SaveBlock(block, partSet, seenCommit)
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
@@ -375,7 +375,7 @@ func doFn(fn func() (interface{}, error)) (res interface{}, err error, panicErr
return res, err, panicErr
}
func newBlock(hdr *types.Header, lastCommit *types.Commit) *types.Block {
func newBlock(hdr types.Header, lastCommit *types.Commit) *types.Block {
return &types.Block{
Header: hdr,
LastCommit: lastCommit,

View File

@@ -2,12 +2,12 @@ package blockchain
import (
"github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/types"
)
var cdc = amino.NewCodec()
func init() {
RegisterBlockchainMessages(cdc)
crypto.RegisterAmino(cdc)
types.RegisterBlockAmino(cdc)
}

View File

@@ -4,7 +4,7 @@ import (
"flag"
"os"
crypto "github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
@@ -37,7 +37,7 @@ func main() {
*chainID,
*addr,
pv,
crypto.GenPrivKeyEd25519(),
ed25519.GenPrivKey(),
)
err := rs.Start()
if err != nil {

View File

@@ -5,8 +5,8 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/p2p"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/p2p"
)
// GenNodeKeyCmd allows the generation of a node key. It prints node's ID to

View File

@@ -1,15 +1,16 @@
package commands
import (
"time"
"fmt"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
tmtime "github.com/tendermint/tendermint/types/time"
)
// InitFilesCmd initialises a fresh Tendermint Core instance.
@@ -52,8 +53,8 @@ func initFilesWithConfig(config *cfg.Config) error {
logger.Info("Found genesis file", "path", genFile)
} else {
genDoc := types.GenesisDoc{
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
GenesisTime: time.Now(),
ChainID: fmt.Sprintf("test-chain-%v", cmn.RandStr(6)),
GenesisTime: tmtime.Now(),
ConsensusParams: types.DefaultConsensusParams(),
}
genDoc.Validators = []types.GenesisValidator{{

View File

@@ -7,7 +7,6 @@ import (
"github.com/spf13/cobra"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/lite/proxy"
rpcclient "github.com/tendermint/tendermint/rpc/client"
)
@@ -66,17 +65,21 @@ func runProxy(cmd *cobra.Command, args []string) error {
}
// First, connect a client
logger.Info("Connecting to source HTTP client...")
node := rpcclient.NewHTTP(nodeAddr, "/websocket")
cert, err := proxy.GetCertifier(chainID, home, nodeAddr)
logger.Info("Constructing Verifier...")
cert, err := proxy.NewVerifier(chainID, home, node, logger)
if err != nil {
return err
return cmn.ErrorWrap(err, "constructing Verifier")
}
cert.SetLogger(logger)
sc := proxy.SecureClient(node, cert)
logger.Info("Starting proxy...")
err = proxy.StartProxy(sc, listenAddr, logger)
if err != nil {
return err
return cmn.ErrorWrap(err, "starting proxy")
}
cmn.TrapSignal(func() {

View File

@@ -5,8 +5,8 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/privval"
)
// ResetAllCmd removes the database of this Tendermint core

View File

@@ -6,15 +6,15 @@ import (
"os"
"path/filepath"
"strings"
"time"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
tmtime "github.com/tendermint/tendermint/types/time"
)
var (
@@ -76,7 +76,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
genVals := make([]types.GenesisValidator, nValidators)
for i := 0; i < nValidators; i++ {
nodeDirName := cmn.Fmt("%s%d", nodeDirPrefix, i)
nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i)
nodeDir := filepath.Join(outputDir, nodeDirName)
config.SetRoot(nodeDir)
@@ -98,7 +98,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
}
for i := 0; i < nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i+nValidators))
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i+nValidators))
config.SetRoot(nodeDir)
err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm)
@@ -112,14 +112,14 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
// Generate genesis doc from generated validators
genDoc := &types.GenesisDoc{
GenesisTime: time.Now(),
GenesisTime: tmtime.Now(),
ChainID: "chain-" + cmn.RandStr(6),
Validators: genVals,
}
// Write genesis file.
for i := 0; i < nValidators+nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil {
_ = os.RemoveAll(outputDir)
return err
@@ -159,7 +159,7 @@ func hostnameOrIP(i int) string {
func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error {
persistentPeers := make([]string, nValidators+nNonValidators)
for i := 0; i < nValidators+nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
config.SetRoot(nodeDir)
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
if err != nil {
@@ -170,7 +170,7 @@ func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error {
persistentPeersList := strings.Join(persistentPeers, ",")
for i := 0; i < nValidators+nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
config.SetRoot(nodeDir)
config.P2P.PersistentPeers = persistentPeersList
config.P2P.AddrBookStrict = false

View File

@@ -2,11 +2,11 @@ package commands
import (
"github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto"
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
)
var cdc = amino.NewCodec()
func init() {
crypto.RegisterAmino(cdc)
cryptoAmino.RegisterAmino(cdc)
}

View File

@@ -21,3 +21,4 @@ ignore:
- "docs"
- "DOCKER"
- "scripts"
- "**/*.pb.go"

View File

@@ -94,7 +94,6 @@ func (cfg *Config) SetRoot(root string) *Config {
// BaseConfig defines the base configuration for a Tendermint node
type BaseConfig struct {
// chainID is unexposed and immutable but here for convenience
chainID string
@@ -102,49 +101,49 @@ type BaseConfig struct {
// This should be set in viper so it can unmarshal into this struct
RootDir string `mapstructure:"home"`
// Path to the JSON file containing the initial validator set and other meta data
Genesis string `mapstructure:"genesis_file"`
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
PrivValidator string `mapstructure:"priv_validator_file"`
// A JSON file containing the private key to use for p2p authenticated encryption
NodeKey string `mapstructure:"node_key_file"`
// A custom human readable name for this node
Moniker string `mapstructure:"moniker"`
// TCP or UNIX socket address for Tendermint to listen on for
// connections from an external PrivValidator process
PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"`
// TCP or UNIX socket address of the ABCI application,
// or the name of an ABCI application compiled in with the Tendermint binary
ProxyApp string `mapstructure:"proxy_app"`
// Mechanism to connect to the ABCI application: socket | grpc
ABCI string `mapstructure:"abci"`
// Output level for logging
LogLevel string `mapstructure:"log_level"`
// TCP or UNIX socket address for the profiling server to listen on
ProfListenAddress string `mapstructure:"prof_laddr"`
// A custom human readable name for this node
Moniker string `mapstructure:"moniker"`
// If this node is many blocks behind the tip of the chain, FastSync
// allows them to catchup quickly by downloading blocks in parallel
// and verifying their commits
FastSync bool `mapstructure:"fast_sync"`
// If true, query the ABCI app on connecting to a new peer
// so the app can decide if we should keep the connection or not
FilterPeers bool `mapstructure:"filter_peers"` // false
// Database backend: leveldb | memdb
DBBackend string `mapstructure:"db_backend"`
// Database directory
DBPath string `mapstructure:"db_dir"`
// Output level for logging
LogLevel string `mapstructure:"log_level"`
// Path to the JSON file containing the initial validator set and other meta data
Genesis string `mapstructure:"genesis_file"`
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
PrivValidator string `mapstructure:"priv_validator_file"`
// TCP or UNIX socket address for Tendermint to listen on for
// connections from an external PrivValidator process
PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"`
// A JSON file containing the private key to use for p2p authenticated encryption
NodeKey string `mapstructure:"node_key_file"`
// Mechanism to connect to the ABCI application: socket | grpc
ABCI string `mapstructure:"abci"`
// TCP or UNIX socket address for the profiling server to listen on
ProfListenAddress string `mapstructure:"prof_laddr"`
// If true, query the ABCI app on connecting to a new peer
// so the app can decide if we should keep the connection or not
FilterPeers bool `mapstructure:"filter_peers"` // false
}
// DefaultBaseConfig returns a default base configuration for a Tendermint node
@@ -239,6 +238,8 @@ type RPCConfig struct {
// If you want to accept more significant number than the default, make sure
// you increase your OS limits.
// 0 - unlimited.
// Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
// 1024 - 40 - 10 - 50 = 924 = ~900
MaxOpenConnections int `mapstructure:"max_open_connections"`
}
@@ -248,11 +249,9 @@ func DefaultRPCConfig() *RPCConfig {
ListenAddress: "tcp://0.0.0.0:26657",
GRPCListenAddress: "",
GRPCMaxOpenConnections: 900, // no ipv4
GRPCMaxOpenConnections: 900,
Unsafe: false,
// should be < {ulimit -Sn} - {MaxNumPeers} - {N of wal, db and other open files}
// 1024 - 50 - 50 = 924 = ~900
Unsafe: false,
MaxOpenConnections: 900,
}
}
@@ -284,7 +283,6 @@ type P2PConfig struct {
Seeds string `mapstructure:"seeds"`
// Comma separated list of nodes to keep persistent connections to
// Do not add private peers to this list if you don't want them advertised
PersistentPeers string `mapstructure:"persistent_peers"`
// UPNP port forwarding
@@ -294,10 +292,14 @@ type P2PConfig struct {
AddrBook string `mapstructure:"addr_book_file"`
// Set true for strict address routability rules
// Set false for private or local networks
AddrBookStrict bool `mapstructure:"addr_book_strict"`
// Maximum number of peers to connect to
MaxNumPeers int `mapstructure:"max_num_peers"`
// Maximum number of inbound peers
MaxNumInboundPeers int `mapstructure:"max_num_inbound_peers"`
// Maximum number of outbound peers to connect to, excluding persistent peers
MaxNumOutboundPeers int `mapstructure:"max_num_outbound_peers"`
// Time to wait before flushing messages out on the connection, in ms
FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"`
@@ -347,11 +349,12 @@ func DefaultP2PConfig() *P2PConfig {
UPNP: false,
AddrBook: defaultAddrBookPath,
AddrBookStrict: true,
MaxNumPeers: 50,
MaxNumInboundPeers: 40,
MaxNumOutboundPeers: 10,
FlushThrottleTimeout: 100,
MaxPacketMsgPayloadSize: 1024, // 1 kB
SendRate: 512000, // 500 kB/s
RecvRate: 512000, // 500 kB/s
MaxPacketMsgPayloadSize: 1024, // 1 kB
SendRate: 5120000, // 5 mB/s
RecvRate: 5120000, // 5 mB/s
PexReactor: true,
SeedMode: false,
AllowDuplicateIP: true, // so non-breaking yet
@@ -418,8 +421,10 @@ func DefaultMempoolConfig() *MempoolConfig {
RecheckEmpty: true,
Broadcast: true,
WalPath: filepath.Join(defaultDataDir, "mempool.wal"),
Size: 100000,
CacheSize: 100000,
// Each signature verification takes .5ms, size reduced until we implement
// ABCI Recheck
Size: 5000,
CacheSize: 10000,
}
}
@@ -464,6 +469,9 @@ type ConsensusConfig struct {
// Reactor sleep duration parameters are in milliseconds
PeerGossipSleepDuration int `mapstructure:"peer_gossip_sleep_duration"`
PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"`
// Block time parameters in milliseconds. Corresponds to the minimum time increment between consecutive blocks.
BlockTimeIota int `mapstructure:"blocktime_iota"`
}
// DefaultConsensusConfig returns a default configuration for the consensus service
@@ -482,6 +490,7 @@ func DefaultConsensusConfig() *ConsensusConfig {
CreateEmptyBlocksInterval: 0,
PeerGossipSleepDuration: 100,
PeerQueryMaj23SleepDuration: 2000,
BlockTimeIota: 1000,
}
}
@@ -498,9 +507,17 @@ func TestConsensusConfig() *ConsensusConfig {
cfg.SkipTimeoutCommit = true
cfg.PeerGossipSleepDuration = 5
cfg.PeerQueryMaj23SleepDuration = 250
cfg.BlockTimeIota = 10
return cfg
}
// MinValidVoteTime returns the minimum acceptable block time.
// See the [BFT time spec](https://godoc.org/github.com/tendermint/tendermint/docs/spec/consensus/bft-time.md).
func (cfg *ConsensusConfig) MinValidVoteTime(lastBlockTime time.Time) time.Time {
return lastBlockTime.
Add(time.Duration(cfg.BlockTimeIota) * time.Millisecond)
}
// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step
func (cfg *ConsensusConfig) WaitForTxs() bool {
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
@@ -557,8 +574,8 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) {
//-----------------------------------------------------------------------------
// TxIndexConfig
// TxIndexConfig defines the configuration for the transaction
// indexer, including tags to index.
// TxIndexConfig defines the configuration for the transaction indexer,
// including tags to index.
type TxIndexConfig struct {
// What indexer to use for transactions
//
@@ -567,16 +584,21 @@ type TxIndexConfig struct {
// 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
Indexer string `mapstructure:"indexer"`
// Comma-separated list of tags to index (by default the only tag is tx hash)
// Comma-separated list of tags to index (by default the only tag is "tx.hash")
//
// You can also index transactions by height by adding "tx.height" tag here.
//
// It's recommended to index only a subset of tags due to possible memory
// bloat. This is, of course, depends on the indexer's DB and the volume of
// transactions.
IndexTags string `mapstructure:"index_tags"`
// When set to true, tells indexer to index all tags. Note this may be not
// desirable (see the comment above). IndexTags has a precedence over
// IndexAllTags (i.e. when given both, IndexTags will be indexed).
// When set to true, tells indexer to index all tags (predefined tags:
// "tx.hash", "tx.height" and all tags from DeliverTx responses).
//
// Note this may be not desirable (see the comment above). IndexTags has a
// precedence over IndexAllTags (i.e. when given both, IndexTags will be
// indexed).
IndexAllTags bool `mapstructure:"index_all_tags"`
}

View File

@@ -81,7 +81,7 @@ fast_sync = {{ .BaseConfig.FastSync }}
db_backend = "{{ .BaseConfig.DBBackend }}"
# Database directory
db_path = "{{ js .BaseConfig.DBPath }}"
db_dir = "{{ js .BaseConfig.DBPath }}"
# Output level for logging, including package level options
log_level = "{{ .BaseConfig.LogLevel }}"
@@ -94,6 +94,10 @@ genesis_file = "{{ js .BaseConfig.Genesis }}"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_file = "{{ js .BaseConfig.PrivValidator }}"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
priv_validator_laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}"
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "{{ js .BaseConfig.NodeKey}}"
@@ -124,6 +128,8 @@ grpc_laddr = "{{ .RPC.GRPCListenAddress }}"
# If you want to accept more significant number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
grpc_max_open_connections = {{ .RPC.GRPCMaxOpenConnections }}
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
@@ -134,6 +140,8 @@ unsafe = {{ .RPC.Unsafe }}
# If you want to accept more significant number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max_open_connections = {{ .RPC.MaxOpenConnections }}
##### peer to peer configuration options #####
@@ -152,7 +160,6 @@ external_address = "{{ .P2P.ExternalAddress }}"
seeds = "{{ .P2P.Seeds }}"
# Comma separated list of nodes to keep persistent connections to
# Do not add private peers to this list if you don't want them advertised
persistent_peers = "{{ .P2P.PersistentPeers }}"
# UPNP port forwarding
@@ -162,13 +169,17 @@ upnp = {{ .P2P.UPNP }}
addr_book_file = "{{ js .P2P.AddrBook }}"
# Set true for strict address routability rules
# Set false for private or local networks
addr_book_strict = {{ .P2P.AddrBookStrict }}
# Time to wait before flushing messages out on the connection, in ms
flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }}
# Maximum number of peers to connect to
max_num_peers = {{ .P2P.MaxNumPeers }}
# Maximum number of inbound peers
max_num_inbound_peers = {{ .P2P.MaxNumInboundPeers }}
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = {{ .P2P.MaxNumOutboundPeers }}
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }}
@@ -240,16 +251,21 @@ peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }}
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "{{ .TxIndex.Indexer }}"
# Comma-separated list of tags to index (by default the only tag is tx hash)
# Comma-separated list of tags to index (by default the only tag is "tx.hash")
#
# You can also index transactions by height by adding "tx.height" tag here.
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = "{{ .TxIndex.IndexTags }}"
# When set to true, tells indexer to index all tags. Note this may be not
# desirable (see the comment above). IndexTags has a precedence over
# IndexAllTags (i.e. when given both, IndexTags will be indexed).
# When set to true, tells indexer to index all tags (predefined tags:
# "tx.hash", "tx.height" and all tags from DeliverTx responses).
#
# Note this may be not desirable (see the comment above). IndexTags has a
# precedence over IndexAllTags (i.e. when given both, IndexTags will be
# indexed).
index_all_tags = {{ .TxIndex.IndexAllTags }}
##### instrumentation configuration options #####

View File

@@ -2,14 +2,15 @@ package consensus
import (
"context"
"fmt"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
func init() {
@@ -156,8 +157,8 @@ func TestByzantine(t *testing.T) {
case <-done:
case <-tick.C:
for i, reactor := range reactors {
t.Log(cmn.Fmt("Consensus Reactor %v", i))
t.Log(cmn.Fmt("%v", reactor))
t.Log(fmt.Sprintf("Consensus Reactor %v", i))
t.Log(fmt.Sprintf("%v", reactor))
}
t.Fatalf("Timed out waiting for all validators to commit first block")
}

View File

@@ -17,14 +17,15 @@ import (
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
cstypes "github.com/tendermint/tendermint/consensus/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/tendermint/tendermint/abci/example/counter"
"github.com/tendermint/tendermint/abci/example/kvstore"
@@ -75,7 +76,7 @@ func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartS
ValidatorAddress: vs.PrivValidator.GetAddress(),
Height: vs.Height,
Round: vs.Round,
Timestamp: time.Now().UTC(),
Timestamp: tmtime.Now(),
Type: voteType,
BlockID: types.BlockID{hash, header},
}
@@ -348,13 +349,13 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i))
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
for _, opt := range configOpts {
opt(thisConfig)
}
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
app := appFunc()
vals := types.TM2PB.Validators(state.Validators)
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app)
@@ -372,18 +373,21 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
for i := 0; i < nPeers; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i))
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
var privVal types.PrivValidator
if i < nValidators {
privVal = privVals[i]
} else {
_, tempFilePath := cmn.Tempfile("priv_validator_")
privVal = privval.GenFilePV(tempFilePath)
tempFile, err := ioutil.TempFile("", "priv_validator_")
if err != nil {
panic(err)
}
privVal = privval.GenFilePV(tempFile.Name())
}
app := appFunc()
vals := types.TM2PB.Validators(state.Validators)
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app)
@@ -420,7 +424,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
sort.Sort(types.PrivValidatorsByAddress(privValidators))
return &types.GenesisDoc{
GenesisTime: time.Now(),
GenesisTime: tmtime.Now(),
ChainID: config.ChainID(),
Validators: validators,
}, privValidators
@@ -429,7 +433,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) {
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
s0, _ := sm.MakeGenesisState(genDoc)
db := dbm.NewMemDB()
db := dbm.NewMemDB() // remove this ?
sm.SaveState(db, s0)
return s0, privValidators
}

View File

@@ -10,7 +10,6 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
abci "github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/types"
)
@@ -89,7 +88,7 @@ func deliverTxsRange(cs *ConsensusState, start, end int) {
binary.BigEndian.PutUint64(txBytes, uint64(i))
err := cs.mempool.CheckTx(txBytes, nil)
if err != nil {
panic(cmn.Fmt("Error after CheckTx: %v", err))
panic(fmt.Sprintf("Error after CheckTx: %v", err))
}
}
}
@@ -100,7 +99,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
height, round := cs.Height, cs.Round
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
NTxs := 10000
NTxs := 3000
go deliverTxsRange(cs, 0, NTxs)
startTestRound(cs, height, round)
@@ -126,7 +125,7 @@ func TestMempoolRmBadTx(t *testing.T) {
binary.BigEndian.PutUint64(txBytes, uint64(0))
resDeliver := app.DeliverTx(txBytes)
assert.False(t, resDeliver.IsErr(), cmn.Fmt("expected no error. got %v", resDeliver))
assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
resCommit := app.Commit()
assert.True(t, len(resCommit.Data) > 0)
@@ -149,7 +148,7 @@ func TestMempoolRmBadTx(t *testing.T) {
// check for the tx
for {
txs := cs.mempool.Reap(1)
txs := cs.mempool.ReapMaxBytes(len(txBytes))
if len(txs) == 0 {
emptyMempoolCh <- struct{}{}
return
@@ -190,7 +189,7 @@ func NewCounterApplication() *CounterApplication {
}
func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
return abci.ResponseInfo{Data: cmn.Fmt("txs:%v", app.txCount)}
return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)}
}
func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx {

View File

@@ -17,6 +17,7 @@ import (
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
const (
@@ -58,9 +59,6 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *Consens
// broadcasted to other peers and starting state if we're not in fast sync.
func (conR *ConsensusReactor) OnStart() error {
conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync())
if err := conR.BaseReactor.OnStart(); err != nil {
return err
}
conR.subscribeToBroadcastEvents()
@@ -77,7 +75,6 @@ func (conR *ConsensusReactor) OnStart() error {
// OnStop implements BaseService by unsubscribing from events and stopping
// state.
func (conR *ConsensusReactor) OnStop() {
conR.BaseReactor.OnStop()
conR.unsubscribeFromBroadcastEvents()
conR.conS.Stop()
if !conR.FastSync() {
@@ -245,7 +242,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
"height", hb.Height, "round", hb.Round, "sequence", hb.Sequence,
"valIdx", hb.ValidatorIndex, "valAddr", hb.ValidatorAddress)
default:
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
case DataChannel:
@@ -266,7 +263,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
}
conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()}
default:
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
case VoteChannel:
@@ -291,7 +288,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
default:
// don't punish (leave room for soft upgrades)
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
case VoteSetBitsChannel:
@@ -323,11 +320,11 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
}
default:
// don't punish (leave room for soft upgrades)
conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
}
default:
conR.Logger.Error(cmn.Fmt("Unknown chId %X", chID))
conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID))
}
if err != nil {
@@ -486,7 +483,7 @@ OUTER_LOOP:
if prs.ProposalBlockParts == nil {
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
if blockMeta == nil {
cmn.PanicCrisis(cmn.Fmt("Failed to load block %d when blockStore is at %d",
cmn.PanicCrisis(fmt.Sprintf("Failed to load block %d when blockStore is at %d",
prs.Height, conR.conS.blockStore.Height()))
}
ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader)
@@ -1038,7 +1035,7 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida
NOTE: This is wrong, 'round' could change.
e.g. if orig round is not the same as block LastCommit round.
if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round {
cmn.PanicSanity(cmn.Fmt("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
cmn.PanicSanity(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
}
*/
if ps.PRS.CatchupCommitRound == round {
@@ -1142,7 +1139,7 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) {
}
func (ps *PeerState) setHasVote(height int64, round int, type_ byte, index int) {
logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", cmn.Fmt("%d/%d", height, round))
logger := ps.logger.With("peerH/R", fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", fmt.Sprintf("%d/%d", height, round))
logger.Debug("setHasVote", "type", type_, "index", index)
// NOTE: some may be nil BitArrays -> no side effects.
@@ -1169,7 +1166,7 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
psCatchupCommitRound := ps.PRS.CatchupCommitRound
psCatchupCommit := ps.PRS.CatchupCommit
startTime := time.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
ps.PRS.Height = msg.Height
ps.PRS.Round = msg.Round
ps.PRS.Step = msg.Step

View File

@@ -4,15 +4,23 @@ import (
"context"
"fmt"
"os"
"path"
"runtime"
"runtime/pprof"
"sync"
"testing"
"time"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
bc "github.com/tendermint/tendermint/blockchain"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
mempl "github.com/tendermint/tendermint/mempool"
sm "github.com/tendermint/tendermint/state"
tmtime "github.com/tendermint/tendermint/types/time"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/p2p"
@@ -91,6 +99,121 @@ func TestReactorBasic(t *testing.T) {
}, css)
}
// Ensure we can process blocks with evidence
func TestReactorWithEvidence(t *testing.T) {
nValidators := 4
testName := "consensus_reactor_test"
tickerFunc := newMockTickerFunc(true)
appFunc := newCounter
// heed the advice from https://www.sandimetz.com/blog/2016/1/20/the-wrong-abstraction
// to unroll unwieldy abstractions. Here we duplicate the code from:
// css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
css := make([]*ConsensusState, nValidators)
logger := consensusLogger()
for i := 0; i < nValidators; i++ {
stateDB := dbm.NewMemDB() // each state needs its own db
state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
app := appFunc()
vals := types.TM2PB.ValidatorUpdates(state.Validators)
app.InitChain(abci.RequestInitChain{Validators: vals})
pv := privVals[i]
// duplicate code from:
// css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app)
blockDB := dbm.NewMemDB()
blockStore := bc.NewBlockStore(blockDB)
// one for mempool, one for consensus
mtx := new(sync.Mutex)
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
// Make Mempool
mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0)
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
if thisConfig.Consensus.WaitForTxs() {
mempool.EnableTxsAvailable()
}
// mock the evidence pool
// everyone includes evidence of another double signing
vIdx := (i + 1) % nValidators
evpool := newMockEvidencePool(privVals[vIdx].GetAddress())
// Make ConsensusState
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
cs.SetPrivValidator(pv)
eventBus := types.NewEventBus()
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
eventBus.Start()
cs.SetEventBus(eventBus)
cs.SetTimeoutTicker(tickerFunc())
cs.SetLogger(logger.With("validator", i, "module", "consensus"))
css[i] = cs
}
reactors, eventChans, eventBuses := startConsensusNet(t, css, nValidators)
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
// wait till everyone makes the first new block with no evidence
timeoutWaitGroup(t, nValidators, func(j int) {
blockI := <-eventChans[j]
block := blockI.(types.EventDataNewBlock).Block
assert.True(t, len(block.Evidence.Evidence) == 0)
}, css)
// second block should have evidence
timeoutWaitGroup(t, nValidators, func(j int) {
blockI := <-eventChans[j]
block := blockI.(types.EventDataNewBlock).Block
assert.True(t, len(block.Evidence.Evidence) > 0)
}, css)
}
// mock evidence pool returns no evidence for block 1,
// and returnes one piece for all higher blocks. The one piece
// is for a given validator at block 1.
type mockEvidencePool struct {
height int
ev []types.Evidence
}
func newMockEvidencePool(val []byte) *mockEvidencePool {
return &mockEvidencePool{
ev: []types.Evidence{types.NewMockGoodEvidence(1, 1, val)},
}
}
// NOTE: maxBytes is ignored
func (m *mockEvidencePool) PendingEvidence(maxBytes int) []types.Evidence {
if m.height > 0 {
return m.ev
}
return nil
}
func (m *mockEvidencePool) AddEvidence(types.Evidence) error { return nil }
func (m *mockEvidencePool) Update(block *types.Block, state sm.State) {
if m.height > 0 {
if len(block.Evidence.Evidence) == 0 {
panic("block has no evidence")
}
}
m.height++
}
//------------------------------------
// Ensure a testnet sends proposal heartbeats and makes blocks when there are txs
func TestReactorProposalHeartbeats(t *testing.T) {
N := 4
@@ -174,14 +297,14 @@ func TestReactorRecordsBlockParts(t *testing.T) {
require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
}
// Test we record votes from other peers
// Test we record votes from other peers.
func TestReactorRecordsVotes(t *testing.T) {
// create dummy peer
// Create dummy peer.
peer := p2pdummy.NewPeer()
ps := NewPeerState(peer).SetLogger(log.TestingLogger())
peer.Set(types.PeerStateKey, ps)
// create reactor
// Create reactor.
css := randConsensusNet(1, "consensus_reactor_records_votes_test", newMockTickerFunc(true), newPersistentKVStore)
reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states
reactor.SetEventBus(css[0].eventBus)
@@ -199,7 +322,7 @@ func TestReactorRecordsVotes(t *testing.T) {
ValidatorAddress: val.Address,
Height: 2,
Round: 0,
Timestamp: time.Now().UTC(),
Timestamp: tmtime.Now(),
Type: types.VoteTypePrevote,
BlockID: types.BlockID{},
}
@@ -419,7 +542,7 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}
err := validateBlock(newBlock, activeVals)
assert.Nil(t, err)
for _, tx := range txs {
css[j].mempool.CheckTx(tx, nil)
err := css[j].mempool.CheckTx(tx, nil)
assert.Nil(t, err)
}
}, css)

View File

@@ -227,7 +227,7 @@ func (h *Handshaker) NBlocks() int {
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
// Handshake is done via ABCI Info on the query conn.
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{version.Version})
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: version.Version})
if err != nil {
return fmt.Errorf("Error calling Info: %v", err)
}
@@ -264,15 +264,15 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
stateBlockHeight := state.LastBlockHeight
h.logger.Info("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight)
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain.
if appBlockHeight == 0 {
validators := types.TM2PB.Validators(state.Validators)
nextVals := types.TM2PB.ValidatorUpdates(state.NextValidators) // state.Validators would work too.
csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams)
req := abci.RequestInitChain{
Time: h.genDoc.GenesisTime.Unix(), // TODO
Time: h.genDoc.GenesisTime,
ChainId: h.genDoc.ChainID,
ConsensusParams: csParams,
Validators: validators,
Validators: nextVals,
AppStateBytes: h.genDoc.AppState,
}
res, err := proxyApp.Consensus().InitChainSync(req)
@@ -280,11 +280,9 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
return nil, err
}
// if the app returned validators
// or consensus params, update the state
// with the them
// If the app returned validators or consensus params, update the state.
if len(res.Validators) > 0 {
vals, err := types.PB2TM.Validators(res.Validators)
vals, err := types.PB2TM.ValidatorUpdates(res.Validators)
if err != nil {
return nil, err
}
@@ -296,7 +294,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
sm.SaveState(h.stateDB, state)
}
// First handle edge cases and constraints on the storeBlockHeight
// First handle edge cases and constraints on the storeBlockHeight.
if storeBlockHeight == 0 {
return appHash, checkAppHash(state, appHash)
@@ -306,11 +304,11 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight
} else if storeBlockHeight < stateBlockHeight {
// the state should never be ahead of the store (this is under tendermint's control)
cmn.PanicSanity(cmn.Fmt("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
cmn.PanicSanity(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
} else if storeBlockHeight > stateBlockHeight+1 {
// store should be at most one ahead of the state (this is under tendermint's control)
cmn.PanicSanity(cmn.Fmt("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
cmn.PanicSanity(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
}
var err error

View File

@@ -13,12 +13,12 @@ import (
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
const (
@@ -34,7 +34,7 @@ func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console
consensusState := newConsensusStateForReplay(config, csConfig)
if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil {
cmn.Exit(cmn.Fmt("Error during consensus replay: %v", err))
cmn.Exit(fmt.Sprintf("Error during consensus replay: %v", err))
}
}
@@ -302,12 +302,12 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
NewHandshaker(stateDB, state, blockStore, gdoc))
err = proxyApp.Start()
if err != nil {
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
cmn.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err))
}
eventBus := types.NewEventBus()
if err := eventBus.Start(); err != nil {
cmn.Exit(cmn.Fmt("Failed to start event bus: %v", err))
cmn.Exit(fmt.Sprintf("Failed to start event bus: %v", err))
}
mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{}

View File

@@ -3,7 +3,6 @@ package consensus
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -20,15 +19,14 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
crypto "github.com/tendermint/tendermint/crypto"
auto "github.com/tendermint/tendermint/libs/autofile"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/libs/log"
)
var consensusReplayConfig *cfg.Config
@@ -376,7 +374,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
defer proxyApp.Stop()
// get the latest app hash from the app
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{""})
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: ""})
if err != nil {
t.Fatal(err)
}
@@ -418,7 +416,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
}
defer proxyApp.Stop()
validators := types.TM2PB.Validators(state.Validators)
validators := types.TM2PB.ValidatorUpdates(state.Validators)
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
Validators: validators,
}); err != nil {
@@ -455,7 +453,7 @@ func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, c
}
defer proxyApp.Stop()
validators := types.TM2PB.Validators(state.Validators)
validators := types.TM2PB.ValidatorUpdates(state.Validators)
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{
Validators: validators,
}); err != nil {
@@ -494,7 +492,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
return nil, nil, err
}
if !found {
return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1))
return nil, nil, fmt.Errorf("WAL does not contain height %d.", 1)
}
defer gr.Close() // nolint: errcheck
@@ -531,11 +529,11 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
panic(err)
}
if block.Height != height+1 {
panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1))
panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1))
}
commitHeight := thisBlockCommit.Precommits[0].Height
if commitHeight != height+1 {
panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
}
blocks = append(blocks, block)
commits = append(commits, thisBlockCommit)
@@ -564,11 +562,11 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
panic(err)
}
if block.Height != height+1 {
panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1))
panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1))
}
commitHeight := thisBlockCommit.Precommits[0].Height
if commitHeight != height+1 {
panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
}
blocks = append(blocks, block)
commits = append(commits, thisBlockCommit)
@@ -641,7 +639,7 @@ func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
func TestInitChainUpdateValidators(t *testing.T) {
val, _ := types.RandValidator(true, 10)
vals := types.NewValidatorSet([]*types.Validator{val})
app := &initChainApp{vals: types.TM2PB.Validators(vals)}
app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
clientCreator := proxy.NewLocalClientCreator(app)
config := ResetConfig("proxy_test_")
@@ -668,7 +666,7 @@ func TestInitChainUpdateValidators(t *testing.T) {
assert.Equal(t, newValAddr, expectValAddr)
}
func newInitChainApp(vals []abci.Validator) *initChainApp {
func newInitChainApp(vals []abci.ValidatorUpdate) *initChainApp {
return &initChainApp{
vals: vals,
}
@@ -677,7 +675,7 @@ func newInitChainApp(vals []abci.Validator) *initChainApp {
// returns the vals on InitChain
type initChainApp struct {
abci.BaseApplication
vals []abci.Validator
vals []abci.ValidatorUpdate
}
func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain {

View File

@@ -12,6 +12,7 @@ import (
fail "github.com/ebuchman/fail-test"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
tmtime "github.com/tendermint/tendermint/types/time"
cfg "github.com/tendermint/tendermint/config"
cstypes "github.com/tendermint/tendermint/consensus/types"
@@ -74,14 +75,13 @@ type ConsensusState struct {
privValidator types.PrivValidator // for signing votes
// services for creating and executing blocks
// TODO: encapsulate all of this in one "BlockManager"
blockExec *sm.BlockExecutor
blockStore sm.BlockStore
mempool sm.Mempool
evpool sm.EvidencePool
// internal state
mtx sync.Mutex
mtx sync.RWMutex
cstypes.RoundState
state sm.State // State until height-1.
@@ -154,6 +154,7 @@ func NewConsensusState(
cs.setProposal = cs.defaultSetProposal
cs.updateToState(state)
// Don't call scheduleRound0 yet.
// We do that upon Start().
cs.reconstructLastCommit(state)
@@ -187,20 +188,29 @@ func WithMetrics(metrics *Metrics) CSOption {
// String returns a string.
func (cs *ConsensusState) String() string {
// better not to access shared variables
return cmn.Fmt("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step)
return fmt.Sprintf("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step)
}
// GetState returns a copy of the chain state.
func (cs *ConsensusState) GetState() sm.State {
cs.mtx.RLock()
defer cs.mtx.RUnlock()
return cs.state.Copy()
}
// GetLastHeight returns the last height committed.
// If there were no blocks, returns 0.
func (cs *ConsensusState) GetLastHeight() int64 {
cs.mtx.Lock()
defer cs.mtx.Unlock()
return cs.state.Copy()
return cs.RoundState.Height - 1
}
// GetRoundState returns a shallow copy of the internal consensus state.
func (cs *ConsensusState) GetRoundState() *cstypes.RoundState {
cs.mtx.Lock()
defer cs.mtx.Unlock()
cs.mtx.RLock()
defer cs.mtx.RUnlock()
rs := cs.RoundState // copy
return &rs
@@ -208,24 +218,24 @@ func (cs *ConsensusState) GetRoundState() *cstypes.RoundState {
// GetRoundStateJSON returns a json of RoundState, marshalled using go-amino.
func (cs *ConsensusState) GetRoundStateJSON() ([]byte, error) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
cs.mtx.RLock()
defer cs.mtx.RUnlock()
return cdc.MarshalJSON(cs.RoundState)
}
// GetRoundStateSimpleJSON returns a json of RoundStateSimple, marshalled using go-amino.
func (cs *ConsensusState) GetRoundStateSimpleJSON() ([]byte, error) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
cs.mtx.RLock()
defer cs.mtx.RUnlock()
return cdc.MarshalJSON(cs.RoundState.RoundStateSimple())
}
// GetValidators returns a copy of the current validators.
func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
cs.mtx.RLock()
defer cs.mtx.RUnlock()
return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators
}
@@ -245,8 +255,8 @@ func (cs *ConsensusState) SetTimeoutTicker(timeoutTicker TimeoutTicker) {
// LoadCommit loads the commit for a given height.
func (cs *ConsensusState) LoadCommit(height int64) *types.Commit {
cs.mtx.Lock()
defer cs.mtx.Unlock()
cs.mtx.RLock()
defer cs.mtx.RUnlock()
if height == cs.blockStore.Height() {
return cs.blockStore.LoadSeenCommit(height)
}
@@ -413,8 +423,8 @@ func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType)
// enterNewRound(height, 0) at cs.StartTime.
func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) {
//cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime)
sleepDuration := rs.StartTime.Sub(time.Now()) // nolint: gotype, gosimple
//cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime)
sleepDuration := rs.StartTime.Sub(tmtime.Now()) // nolint: gotype, gosimple
cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight)
}
@@ -451,7 +461,7 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
}
added, err := lastPrecommits.AddVote(precommit)
if !added || err != nil {
cmn.PanicCrisis(cmn.Fmt("Failed to reconstruct LastCommit: %v", err))
cmn.PanicCrisis(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err))
}
}
if !lastPrecommits.HasTwoThirdsMajority() {
@@ -464,13 +474,13 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) {
// The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight.
func (cs *ConsensusState) updateToState(state sm.State) {
if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight {
cmn.PanicSanity(cmn.Fmt("updateToState() expected state height of %v but found %v",
cmn.PanicSanity(fmt.Sprintf("updateToState() expected state height of %v but found %v",
cs.Height, state.LastBlockHeight))
}
if !cs.state.IsEmpty() && cs.state.LastBlockHeight+1 != cs.Height {
// This might happen when someone else is mutating cs.state.
// Someone forgot to pass in state.Copy() somewhere?!
cmn.PanicSanity(cmn.Fmt("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v",
cmn.PanicSanity(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v",
cs.state.LastBlockHeight+1, cs.Height))
}
@@ -507,7 +517,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
// to be gathered for the first block.
// And alternative solution that relies on clocks:
// cs.StartTime = state.LastBlockTime.Add(timeoutCommit)
cs.StartTime = cs.config.Commit(time.Now())
cs.StartTime = cs.config.Commit(tmtime.Now())
} else {
cs.StartTime = cs.config.Commit(cs.CommitTime)
}
@@ -553,9 +563,30 @@ func (cs *ConsensusState) newStep() {
// Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities.
// ConsensusState must be locked before any internal state is updated.
func (cs *ConsensusState) receiveRoutine(maxSteps int) {
onExit := func(cs *ConsensusState) {
// NOTE: the internalMsgQueue may have signed messages from our
// priv_val that haven't hit the WAL, but its ok because
// priv_val tracks LastSig
// close wal now that we're done writing to it
cs.wal.Stop()
cs.wal.Wait()
close(cs.done)
}
defer func() {
if r := recover(); r != nil {
cs.Logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack()))
// stop gracefully
//
// NOTE: We most probably shouldn't be running any further when there is
// some unexpected panic. Some unknown error happened, and so we don't
// know if that will result in the validator signing an invalid thing. It
// might be worthwhile to explore a mechanism for manual resuming via
// some console or secure RPC system, but for now, halting the chain upon
// unexpected consensus bugs sounds like the better option.
onExit(cs)
}
}()
@@ -571,8 +602,8 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
var mi msgInfo
select {
case height := <-cs.mempool.TxsAvailable():
cs.handleTxsAvailable(height)
case <-cs.mempool.TxsAvailable():
cs.handleTxsAvailable()
case mi = <-cs.peerMsgQueue:
cs.wal.Write(mi)
// handles proposals, block parts, votes
@@ -588,16 +619,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
// go to the next step
cs.handleTimeout(ti, rs)
case <-cs.Quit():
// NOTE: the internalMsgQueue may have signed messages from our
// priv_val that haven't hit the WAL, but its ok because
// priv_val tracks LastSig
// close wal now that we're done writing to it
cs.wal.Stop()
cs.wal.Wait()
close(cs.done)
onExit(cs)
return
}
}
@@ -678,16 +700,16 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent())
cs.enterNewRound(ti.Height, ti.Round+1)
default:
panic(cmn.Fmt("Invalid timeout step: %v", ti.Step))
panic(fmt.Sprintf("Invalid timeout step: %v", ti.Step))
}
}
func (cs *ConsensusState) handleTxsAvailable(height int64) {
func (cs *ConsensusState) handleTxsAvailable() {
cs.mtx.Lock()
defer cs.mtx.Unlock()
// we only need to do this for round 0
cs.enterPropose(height, 0)
cs.enterPropose(cs.Height, 0)
}
//-----------------------------------------------------------------------------
@@ -704,15 +726,15 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
if now := time.Now(); cs.StartTime.After(now) {
if now := tmtime.Now(); cs.StartTime.After(now) {
logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now)
}
logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(fmt.Sprintf("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
// Increment validators if necessary
validators := cs.Validators
@@ -799,10 +821,10 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(fmt.Sprintf("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPropose:
@@ -882,7 +904,7 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""})
}
cs.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal)
cs.Logger.Debug(cmn.Fmt("Signed proposal block: %v", block))
cs.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block))
} else {
if !cs.replayMode {
cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err)
@@ -907,6 +929,8 @@ func (cs *ConsensusState) isProposalComplete() bool {
}
// Create the next block to propose and return it.
// We really only need to return the parts, but the block
// is returned for convenience so we can log the proposal block.
// Returns nil block upon error.
// NOTE: keep it side-effect free for clarity.
func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts *types.PartSet) {
@@ -924,14 +948,25 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
return
}
maxBytes := cs.state.ConsensusParams.BlockSize.MaxBytes
// bound evidence to 1/10th of the block
evidence := cs.evpool.PendingEvidence(maxBytes / 10)
// Mempool validated transactions
txs := cs.mempool.Reap(cs.state.ConsensusParams.BlockSize.MaxTxs)
block, parts := cs.state.MakeBlock(cs.Height, txs, commit)
evidence := cs.evpool.PendingEvidence()
block.AddEvidence(evidence)
txs := cs.mempool.ReapMaxBytes(maxDataBytes(maxBytes, cs.state.Validators.Size(), len(evidence)))
proposerAddr := cs.privValidator.GetAddress()
block, parts := cs.state.MakeBlock(cs.Height, txs, commit, evidence, proposerAddr)
return block, parts
}
func maxDataBytes(maxBytes, valsCount, evidenceCount int) int {
return maxBytes -
types.MaxAminoOverheadForBlock -
types.MaxHeaderBytes -
(valsCount * types.MaxVoteBytes) -
(evidenceCount * types.MaxEvidenceBytes)
}
// Enter: `timeoutPropose` after entering Propose.
// Enter: proposal block and POL is ready.
// Enter: any +2/3 prevotes for future round.
@@ -939,7 +974,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
// Otherwise vote nil.
func (cs *ConsensusState) enterPrevote(height int64, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) {
cs.Logger.Debug(cmn.Fmt("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(fmt.Sprintf("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
@@ -957,7 +992,7 @@ func (cs *ConsensusState) enterPrevote(height int64, round int) {
// TODO: catchup event?
}
cs.Logger.Info(cmn.Fmt("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Info(fmt.Sprintf("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
// Sign and broadcast vote as necessary
cs.doPrevote(height, round)
@@ -1003,13 +1038,13 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
if !cs.Votes.Prevotes(round).HasTwoThirdsAny() {
cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
cmn.PanicSanity(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round))
}
logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(fmt.Sprintf("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrevoteWait:
@@ -1031,11 +1066,11 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(fmt.Sprintf("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrecommit:
@@ -1063,7 +1098,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
// the latest POLRound should be this round.
polRound, _ := cs.Votes.POLInfo()
if polRound < round {
cmn.PanicSanity(cmn.Fmt("This POLRound should be %v but got %", round, polRound))
cmn.PanicSanity(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound))
}
// +2/3 prevoted nil. Unlock and precommit nil.
@@ -1097,7 +1132,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash)
// Validate the block.
if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil {
cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
cmn.PanicConsensus(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err))
}
cs.LockedRound = round
cs.LockedBlock = cs.ProposalBlock
@@ -1127,13 +1162,13 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) {
logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
}
if !cs.Votes.Precommits(round).HasTwoThirdsAny() {
cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
cmn.PanicSanity(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round))
}
logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Info(fmt.Sprintf("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterPrecommitWait:
@@ -1151,17 +1186,17 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
logger := cs.Logger.With("height", height, "commitRound", commitRound)
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
return
}
logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
logger.Info(fmt.Sprintf("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
defer func() {
// Done enterCommit:
// keep cs.Round the same, commitRound points to the right Precommits set.
cs.updateRoundStep(cs.Round, cstypes.RoundStepCommit)
cs.CommitRound = commitRound
cs.CommitTime = time.Now()
cs.CommitTime = tmtime.Now()
cs.newStep()
// Maybe finalize immediately.
@@ -1201,7 +1236,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) {
logger := cs.Logger.With("height", height)
if cs.Height != height {
cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
cmn.PanicSanity(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
}
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
@@ -1223,7 +1258,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) {
// Increment height and goto cstypes.RoundStepNewHeight
func (cs *ConsensusState) finalizeCommit(height int64) {
if cs.Height != height || cs.Step != cstypes.RoundStepCommit {
cs.Logger.Debug(cmn.Fmt("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(fmt.Sprintf("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step))
return
}
@@ -1231,21 +1266,21 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts
if !ok {
cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, commit does not have two thirds majority"))
cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority"))
}
if !blockParts.HasHeader(blockID.PartsHeader) {
cmn.PanicSanity(cmn.Fmt("Expected ProposalBlockParts header to be commit header"))
cmn.PanicSanity(fmt.Sprintf("Expected ProposalBlockParts header to be commit header"))
}
if !block.HashesTo(blockID.Hash) {
cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash"))
}
if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil {
cmn.PanicConsensus(cmn.Fmt("+2/3 committed an invalid block: %v", err))
cmn.PanicConsensus(fmt.Sprintf("+2/3 committed an invalid block: %v", err))
}
cs.Logger.Info(cmn.Fmt("Finalizing commit of block with %d txs", block.NumTxs),
cs.Logger.Info(fmt.Sprintf("Finalizing commit of block with %d txs", block.NumTxs),
"height", block.Height, "hash", block.Hash(), "root", block.AppHash)
cs.Logger.Info(cmn.Fmt("%v", block))
cs.Logger.Info(fmt.Sprintf("%v", block))
fail.Fail() // XXX
@@ -1416,7 +1451,11 @@ func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p
}
if added && cs.ProposalBlockParts.IsComplete() {
// Added and completed!
_, err = cdc.UnmarshalBinaryReader(cs.ProposalBlockParts.GetReader(), &cs.ProposalBlock, int64(cs.state.ConsensusParams.BlockSize.MaxBytes))
_, err = cdc.UnmarshalBinaryReader(
cs.ProposalBlockParts.GetReader(),
&cs.ProposalBlock,
int64(cs.state.ConsensusParams.BlockSize.MaxBytes),
)
if err != nil {
return true, err
}
@@ -1497,7 +1536,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
return added, err
}
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
cs.Logger.Info(fmt.Sprintf("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
cs.evsw.FireEvent(types.EventVote, vote)
@@ -1613,7 +1652,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
cs.enterPrecommitWait(height, vote.Round)
}
default:
panic(cmn.Fmt("Unexpected vote type %X", vote.Type)) // go-wire should prevent this.
panic(fmt.Sprintf("Unexpected vote type %X", vote.Type)) // go-wire should prevent this.
}
return
@@ -1622,12 +1661,13 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
addr := cs.privValidator.GetAddress()
valIndex, _ := cs.Validators.GetByAddress(addr)
vote := &types.Vote{
ValidatorAddress: addr,
ValidatorIndex: valIndex,
Height: cs.Height,
Round: cs.Round,
Timestamp: time.Now().UTC(),
Timestamp: cs.voteTime(),
Type: type_,
BlockID: types.BlockID{hash, header},
}
@@ -1635,6 +1675,23 @@ func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSet
return vote, err
}
func (cs *ConsensusState) voteTime() time.Time {
now := tmtime.Now()
minVoteTime := now
// TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil,
// even if cs.LockedBlock != nil. See https://github.com/tendermint/spec.
if cs.LockedBlock != nil {
minVoteTime = cs.config.MinValidVoteTime(cs.LockedBlock.Time)
} else if cs.ProposalBlock != nil {
minVoteTime = cs.config.MinValidVoteTime(cs.ProposalBlock.Time)
}
if now.After(minVoteTime) {
return now
}
return minVoteTime
}
// sign the vote and publish on internalMsgQueue
func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.PartSetHeader) *types.Vote {
// if we don't have a key or we're not in the validator set, do nothing

View File

@@ -8,10 +8,9 @@ import (
"time"
cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/libs/log"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/log"
)
func init() {
@@ -84,7 +83,7 @@ func TestStateProposerSelection0(t *testing.T) {
prop = cs1.GetRoundState().Validators.GetProposer()
if !bytes.Equal(prop.Address, vss[1].GetAddress()) {
panic(cmn.Fmt("expected proposer to be validator %d. Got %X", 1, prop.Address))
panic(fmt.Sprintf("expected proposer to be validator %d. Got %X", 1, prop.Address))
}
}
@@ -104,8 +103,9 @@ func TestStateProposerSelection2(t *testing.T) {
// everyone just votes nil. we get a new proposer each round
for i := 0; i < len(vss); i++ {
prop := cs1.GetRoundState().Validators.GetProposer()
if !bytes.Equal(prop.Address, vss[(i+2)%len(vss)].GetAddress()) {
panic(cmn.Fmt("expected proposer to be validator %d. Got %X", (i+2)%len(vss), prop.Address))
correctProposer := vss[(i+2)%len(vss)].GetAddress()
if !bytes.Equal(prop.Address, correctProposer) {
panic(fmt.Sprintf("expected RoundState.Validators.GetProposer() to be validator %d. Got %X", (i+2)%len(vss), prop.Address))
}
rs := cs1.GetRoundState()
@@ -444,7 +444,7 @@ func TestStateLockNoPOL(t *testing.T) {
// now we're on a new round and are the proposer
if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) {
panic(cmn.Fmt("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock))
panic(fmt.Sprintf("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock))
}
<-voteCh // prevote

View File

@@ -6,9 +6,9 @@ import (
"strings"
"sync"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
)
type RoundVoteSet struct {
@@ -169,7 +169,7 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet {
case types.VoteTypePrecommit:
return rvs.Precommits
default:
cmn.PanicSanity(cmn.Fmt("Unexpected vote type %X", type_))
cmn.PanicSanity(fmt.Sprintf("Unexpected vote type %X", type_))
return nil
}
}
@@ -219,7 +219,7 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string {
voteSetString = roundVoteSet.Precommits.StringShort()
vsStrings = append(vsStrings, voteSetString)
}
return cmn.Fmt(`HeightVoteSet{H:%v R:0~%v
return fmt.Sprintf(`HeightVoteSet{H:%v R:0~%v
%s %v
%s}`,
hvs.height, hvs.round,

View File

@@ -1,12 +1,12 @@
package types
import (
"fmt"
"testing"
"time"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
tmtime "github.com/tendermint/tendermint/types/time"
)
var config *cfg.Config // NOTE: must be reset for each _test.go file
@@ -55,14 +55,14 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali
ValidatorIndex: valIndex,
Height: height,
Round: round,
Timestamp: time.Now().UTC(),
Timestamp: tmtime.Now(),
Type: types.VoteTypePrecommit,
BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}},
}
chainID := config.ChainID()
err := privVal.SignVote(chainID, vote)
if err != nil {
panic(cmn.Fmt("Error signing vote: %v", err))
panic(fmt.Sprintf("Error signing vote: %v", err))
return nil
}
return vote

View File

@@ -4,8 +4,8 @@ import (
"fmt"
"time"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/types"
)
//-----------------------------------------------------------------------------

View File

@@ -5,8 +5,8 @@ import (
"fmt"
"time"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/types"
)
//-----------------------------------------------------------------------------

View File

@@ -2,12 +2,12 @@ package types
import (
"testing"
"time"
"github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/crypto/ed25519"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
func BenchmarkRoundStateDeepCopy(b *testing.B) {
@@ -23,11 +23,11 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
Hash: cmn.RandBytes(20),
},
}
sig := crypto.SignatureEd25519{}
sig := make([]byte, ed25519.SignatureSize)
for i := 0; i < nval; i++ {
precommits[i] = &types.Vote{
ValidatorAddress: types.Address(cmn.RandBytes(20)),
Timestamp: time.Now(),
Timestamp: tmtime.Now(),
BlockID: blockID,
Signature: sig,
}
@@ -38,9 +38,9 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
}
// Random block
block := &types.Block{
Header: &types.Header{
Header: types.Header{
ChainID: cmn.RandStr(12),
Time: time.Now(),
Time: tmtime.Now(),
LastBlockID: blockID,
LastCommitHash: cmn.RandBytes(20),
DataHash: cmn.RandBytes(20),
@@ -50,7 +50,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
LastResultsHash: cmn.RandBytes(20),
EvidenceHash: cmn.RandBytes(20),
},
Data: &types.Data{
Data: types.Data{
Txs: txs,
},
Evidence: types.EvidenceData{},
@@ -62,7 +62,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
parts := block.MakePartSet(4096)
// Random Proposal
proposal := &types.Proposal{
Timestamp: time.Now(),
Timestamp: tmtime.Now(),
BlockPartsHeader: types.PartSetHeader{
Hash: cmn.RandBytes(20),
},
@@ -73,8 +73,8 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) {
// TODO: hvs :=
rs := &RoundState{
StartTime: time.Now(),
CommitTime: time.Now(),
StartTime: tmtime.Now(),
CommitTime: tmtime.Now(),
Validators: vset,
Proposal: proposal,
ProposalBlock: block,

View File

@@ -2,11 +2,11 @@ package types
import (
"github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/types"
)
var cdc = amino.NewCodec()
func init() {
crypto.RegisterAmino(cdc)
types.RegisterBlockAmino(cdc)
}

View File

@@ -1,8 +1,6 @@
package consensus
import (
cmn "github.com/tendermint/tendermint/libs/common"
)
import "fmt"
// kind of arbitrary
var Spec = "1" // async
@@ -10,4 +8,4 @@ var Major = "0" //
var Minor = "2" // replay refactor
var Revision = "2" // validation -> commit
var Version = cmn.Fmt("v%s/%s.%s.%s", Spec, Major, Minor, Revision)
var Version = fmt.Sprintf("v%s/%s.%s.%s", Spec, Major, Minor, Revision)

View File

@@ -11,9 +11,10 @@ import (
"github.com/pkg/errors"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/types"
auto "github.com/tendermint/tendermint/libs/autofile"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
const (
@@ -119,8 +120,8 @@ func (wal *baseWAL) Write(msg WALMessage) {
}
// Write the wal message
if err := wal.enc.Encode(&TimedWALMessage{time.Now(), msg}); err != nil {
panic(cmn.Fmt("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg))
if err := wal.enc.Encode(&TimedWALMessage{tmtime.Now(), msg}); err != nil {
panic(fmt.Sprintf("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg))
}
}
@@ -134,7 +135,7 @@ func (wal *baseWAL) WriteSync(msg WALMessage) {
wal.Write(msg)
if err := wal.group.Flush(); err != nil {
panic(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err))
panic(fmt.Sprintf("Error flushing consensus wal buf to file. Error: %v \n", err))
}
}

View File

@@ -13,14 +13,14 @@ import (
"github.com/tendermint/tendermint/abci/example/kvstore"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
auto "github.com/tendermint/tendermint/libs/autofile"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
// WALWithNBlocks generates a consensus WAL. It does this by spining up a

View File

@@ -3,20 +3,21 @@ package consensus
import (
"bytes"
"crypto/rand"
"fmt"
// "sync"
"testing"
"time"
"github.com/tendermint/tendermint/consensus/types"
tmtypes "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tendermint/libs/common"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestWALEncoderDecoder(t *testing.T) {
now := time.Now()
now := tmtime.Now()
msgs := []TimedWALMessage{
TimedWALMessage{Time: now, Msg: EndHeightMessage{0}},
TimedWALMessage{Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}},
@@ -54,8 +55,8 @@ func TestWALSearchForEndHeight(t *testing.T) {
h := int64(3)
gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{})
assert.NoError(t, err, cmn.Fmt("expected not to err on height %d", h))
assert.True(t, found, cmn.Fmt("expected to find end height for %d", h))
assert.NoError(t, err, fmt.Sprintf("expected not to err on height %d", h))
assert.True(t, found, fmt.Sprintf("expected to find end height for %d", h))
assert.NotNil(t, gr, "expected group not to be nil")
defer gr.Close()
@@ -64,7 +65,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
assert.NoError(t, err, "expected to decode a message")
rs, ok := msg.Msg.(tmtypes.EventDataRoundState)
assert.True(t, ok, "expected message of type EventDataRoundState")
assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height"))
assert.Equal(t, rs.Height, h+1, fmt.Sprintf("wrong height"))
}
/*
@@ -93,7 +94,7 @@ func benchmarkWalDecode(b *testing.B, n int) {
enc := NewWALEncoder(buf)
data := nBytes(n)
enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second)})
enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()})
encoded := buf.Bytes()

View File

@@ -2,7 +2,7 @@ package consensus
import (
"github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/types"
)
var cdc = amino.NewCodec()
@@ -10,5 +10,5 @@ var cdc = amino.NewCodec()
func init() {
RegisterConsensusMessages(cdc)
RegisterWALMessages(cdc)
crypto.RegisterAmino(cdc)
types.RegisterBlockAmino(cdc)
}

View File

@@ -3,8 +3,15 @@
crypto is the cryptographic package adapted for Tendermint's uses
## Importing it
To get the interfaces,
`import "github.com/tendermint/tendermint/crypto"`
For any specific algorithm, use its specific module e.g.
`import "github.com/tendermint/tendermint/crypto/ed25519"`
If you want to decode bytes into one of the types, but don't care about the specific algorithm, use
`import "github.com/tendermint/tendermint/crypto/amino"`
## Binary encoding
For Binary encoding, please refer to the [Tendermint encoding spec](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md).
@@ -16,10 +23,8 @@ crypto `.Bytes()` uses Amino:binary encoding, but Amino:JSON is also supported.
```go
Example Amino:JSON encodings:
crypto.PrivKeyEd25519 - {"type":"954568A3288910","value":"EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="}
crypto.SignatureEd25519 - {"type":"6BF5903DA1DB28","value":"77sQNZOrf7ltExpf7AV1WaYPCHbyRLgjBsoWVzcduuLk+jIGmYk+s5R6Emm29p12HeiNAuhUJgdFGmwkpeGJCA=="}
crypto.PubKeyEd25519 - {"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="}
ed25519.PrivKeyEd25519 - {"type":"954568A3288910","value":"EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="}
ed25519.PubKeyEd25519 - {"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="}
crypto.PrivKeySecp256k1 - {"type":"019E82E1B0F798","value":"zx4Pnh67N+g2V+5vZbQzEyRerX9c4ccNZOVzM9RvJ0Y="}
crypto.SignatureSecp256k1 - {"type":"6D1EA416E1FEE8","value":"MEUCIQCIg5TqS1l7I+MKTrSPIuUN2+4m5tA29dcauqn3NhEJ2wIgICaZ+lgRc5aOTVahU/XoLopXKn8BZcl0bnuYWLvohR8="}
crypto.PubKeySecp256k1 - {"type":"F8CCEAEB5AE980","value":"A8lPKJXcNl5VHt1FK8a244K9EJuS4WX1hFBnwisi0IJx"}
```

View File

@@ -1,37 +0,0 @@
package crypto
import (
amino "github.com/tendermint/go-amino"
)
var cdc = amino.NewCodec()
func init() {
// NOTE: It's important that there be no conflicts here,
// as that would change the canonical representations,
// and therefore change the address.
// TODO: Add feature to go-amino to ensure that there
// are no conflicts.
RegisterAmino(cdc)
}
// RegisterAmino registers all crypto related types in the given (amino) codec.
func RegisterAmino(cdc *amino.Codec) {
cdc.RegisterInterface((*PubKey)(nil), nil)
cdc.RegisterConcrete(PubKeyEd25519{},
"tendermint/PubKeyEd25519", nil)
cdc.RegisterConcrete(PubKeySecp256k1{},
"tendermint/PubKeySecp256k1", nil)
cdc.RegisterInterface((*PrivKey)(nil), nil)
cdc.RegisterConcrete(PrivKeyEd25519{},
"tendermint/PrivKeyEd25519", nil)
cdc.RegisterConcrete(PrivKeySecp256k1{},
"tendermint/PrivKeySecp256k1", nil)
cdc.RegisterInterface((*Signature)(nil), nil)
cdc.RegisterConcrete(SignatureEd25519{},
"tendermint/SignatureEd25519", nil)
cdc.RegisterConcrete(SignatureSecp256k1{},
"tendermint/SignatureSecp256k1", nil)
}

View File

@@ -1,4 +1,4 @@
package crypto
package armor
import (
"bytes"

View File

@@ -1,4 +1,4 @@
package crypto
package armor
import (
"testing"

30
crypto/crypto.go Normal file
View File

@@ -0,0 +1,30 @@
package crypto
import (
cmn "github.com/tendermint/tendermint/libs/common"
)
type PrivKey interface {
Bytes() []byte
Sign(msg []byte) ([]byte, error)
PubKey() PubKey
Equals(PrivKey) bool
}
// An address is a []byte, but hex-encoded even in JSON.
// []byte leaves us the option to change the address length.
// Use an alias so Unmarshal methods (with ptr receivers) are available too.
type Address = cmn.HexBytes
type PubKey interface {
Address() Address
Bytes() []byte
VerifyBytes(msg []byte, sig []byte) bool
Equals(PubKey) bool
}
type Symmetric interface {
Keygen() []byte
Encrypt(plaintext []byte, secret []byte) (ciphertext []byte)
Decrypt(ciphertext []byte, secret []byte) (plaintext []byte, err error)
}

View File

@@ -22,7 +22,7 @@
// pubKey := key.PubKey()
// For example:
// privKey, err := crypto.GenPrivKeyEd25519()
// privKey, err := ed25519.GenPrivKey()
// if err != nil {
// ...
// }

View File

@@ -0,0 +1,26 @@
package ed25519
import (
"io"
"testing"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/internal/benchmarking"
)
func BenchmarkKeyGeneration(b *testing.B) {
benchmarkKeygenWrapper := func(reader io.Reader) crypto.PrivKey {
return genPrivKey(reader)
}
benchmarking.BenchmarkKeyGeneration(b, benchmarkKeygenWrapper)
}
func BenchmarkSigning(b *testing.B) {
priv := GenPrivKey()
benchmarking.BenchmarkSigning(b, priv)
}
func BenchmarkVerification(b *testing.B) {
priv := GenPrivKey()
benchmarking.BenchmarkVerification(b, priv)
}

196
crypto/ed25519/ed25519.go Normal file
View File

@@ -0,0 +1,196 @@
package ed25519
import (
"bytes"
"crypto/subtle"
"fmt"
"io"
"github.com/tendermint/ed25519"
"github.com/tendermint/ed25519/extra25519"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash"
)
//-------------------------------------
var _ crypto.PrivKey = PrivKeyEd25519{}
const (
PrivKeyAminoRoute = "tendermint/PrivKeyEd25519"
PubKeyAminoRoute = "tendermint/PubKeyEd25519"
// Size of an Edwards25519 signature. Namely the size of a compressed
// Edwards25519 point, and a field element. Both of which are 32 bytes.
SignatureSize = 64
)
var cdc = amino.NewCodec()
func init() {
cdc.RegisterInterface((*crypto.PubKey)(nil), nil)
cdc.RegisterConcrete(PubKeyEd25519{},
PubKeyAminoRoute, nil)
cdc.RegisterInterface((*crypto.PrivKey)(nil), nil)
cdc.RegisterConcrete(PrivKeyEd25519{},
PrivKeyAminoRoute, nil)
}
// PrivKeyEd25519 implements crypto.PrivKey.
type PrivKeyEd25519 [64]byte
// Bytes marshals the privkey using amino encoding.
func (privKey PrivKeyEd25519) Bytes() []byte {
return cdc.MustMarshalBinaryBare(privKey)
}
// Sign produces a signature on the provided message.
func (privKey PrivKeyEd25519) Sign(msg []byte) ([]byte, error) {
privKeyBytes := [64]byte(privKey)
signatureBytes := ed25519.Sign(&privKeyBytes, msg)
return signatureBytes[:], nil
}
// PubKey gets the corresponding public key from the private key.
func (privKey PrivKeyEd25519) PubKey() crypto.PubKey {
privKeyBytes := [64]byte(privKey)
initialized := false
// If the latter 32 bytes of the privkey are all zero, compute the pubkey
// otherwise privkey is initialized and we can use the cached value inside
// of the private key.
for _, v := range privKeyBytes[32:] {
if v != 0 {
initialized = true
break
}
}
if initialized {
var pubkeyBytes [PubKeyEd25519Size]byte
copy(pubkeyBytes[:], privKeyBytes[32:])
return PubKeyEd25519(pubkeyBytes)
}
pubBytes := *ed25519.MakePublicKey(&privKeyBytes)
return PubKeyEd25519(pubBytes)
}
// Equals - you probably don't need to use this.
// Runs in constant time based on length of the keys.
func (privKey PrivKeyEd25519) Equals(other crypto.PrivKey) bool {
if otherEd, ok := other.(PrivKeyEd25519); ok {
return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1
} else {
return false
}
}
// ToCurve25519 takes a private key and returns its representation on
// Curve25519. Curve25519 is birationally equivalent to Edwards25519,
// which Ed25519 uses internally. This method is intended for use in
// an X25519 Diffie Hellman key exchange.
func (privKey PrivKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte {
keyCurve25519 := new([32]byte)
privKeyBytes := [64]byte(privKey)
extra25519.PrivateKeyToCurve25519(keyCurve25519, &privKeyBytes)
return keyCurve25519
}
// GenPrivKey generates a new ed25519 private key.
// It uses OS randomness in conjunction with the current global random seed
// in tendermint/libs/common to generate the private key.
func GenPrivKey() PrivKeyEd25519 {
return genPrivKey(crypto.CReader())
}
// genPrivKey generates a new ed25519 private key using the provided reader.
func genPrivKey(rand io.Reader) PrivKeyEd25519 {
privKey := new([64]byte)
_, err := io.ReadFull(rand, privKey[:32])
if err != nil {
panic(err)
}
// ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey.
// It places the pubkey in the last 32 bytes of privKey, and returns the
// public key.
ed25519.MakePublicKey(privKey)
return PrivKeyEd25519(*privKey)
}
// GenPrivKeyFromSecret hashes the secret with SHA2, and uses
// that 32 byte output to create the private key.
// NOTE: secret should be the output of a KDF like bcrypt,
// if it's derived from user input.
func GenPrivKeyFromSecret(secret []byte) PrivKeyEd25519 {
privKey32 := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes.
privKey := new([64]byte)
copy(privKey[:32], privKey32)
// ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey.
// It places the pubkey in the last 32 bytes of privKey, and returns the
// public key.
ed25519.MakePublicKey(privKey)
return PrivKeyEd25519(*privKey)
}
//-------------------------------------
var _ crypto.PubKey = PubKeyEd25519{}
// PubKeyEd25519Size is the number of bytes in an Ed25519 signature.
const PubKeyEd25519Size = 32
// PubKeyEd25519 implements crypto.PubKey for the Ed25519 signature scheme.
type PubKeyEd25519 [PubKeyEd25519Size]byte
// Address is the SHA256-20 of the raw pubkey bytes.
func (pubKey PubKeyEd25519) Address() crypto.Address {
return crypto.Address(tmhash.Sum(pubKey[:]))
}
// Bytes marshals the PubKey using amino encoding.
func (pubKey PubKeyEd25519) Bytes() []byte {
bz, err := cdc.MarshalBinaryBare(pubKey)
if err != nil {
panic(err)
}
return bz
}
func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ []byte) bool {
// make sure we use the same algorithm to sign
if len(sig_) != SignatureSize {
return false
}
sig := new([SignatureSize]byte)
copy(sig[:], sig_)
pubKeyBytes := [PubKeyEd25519Size]byte(pubKey)
return ed25519.Verify(&pubKeyBytes, msg, sig)
}
// ToCurve25519 takes a public key and returns its representation on
// Curve25519. Curve25519 is birationally equivalent to Edwards25519,
// which Ed25519 uses internally. This method is intended for use in
// an X25519 Diffie Hellman key exchange.
//
// If there is an error, then this function returns nil.
func (pubKey PubKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte {
keyCurve25519, pubKeyBytes := new([PubKeyEd25519Size]byte), [PubKeyEd25519Size]byte(pubKey)
ok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes)
if !ok {
return nil
}
return keyCurve25519
}
func (pubKey PubKeyEd25519) String() string {
return fmt.Sprintf("PubKeyEd25519{%X}", pubKey[:])
}
// nolint: golint
func (pubKey PubKeyEd25519) Equals(other crypto.PubKey) bool {
if otherEd, ok := other.(PubKeyEd25519); ok {
return bytes.Equal(pubKey[:], otherEd[:])
} else {
return false
}
}

View File

@@ -0,0 +1,29 @@
package ed25519_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
)
func TestSignAndValidateEd25519(t *testing.T) {
privKey := ed25519.GenPrivKey()
pubKey := privKey.PubKey()
msg := crypto.CRandBytes(128)
sig, err := privKey.Sign(msg)
require.Nil(t, err)
// Test the signature
assert.True(t, pubKey.VerifyBytes(msg, sig))
// Mutate the signature, just one bit.
// TODO: Replace this with a much better fuzzer, tendermint/ed25519/issues/10
sig[7] ^= byte(0x01)
assert.False(t, pubKey.VerifyBytes(msg, sig))
}

View File

@@ -0,0 +1,50 @@
package cryptoAmino
import (
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/multisig"
"github.com/tendermint/tendermint/crypto/secp256k1"
)
var cdc = amino.NewCodec()
func init() {
// NOTE: It's important that there be no conflicts here,
// as that would change the canonical representations,
// and therefore change the address.
// TODO: Remove above note when
// https://github.com/tendermint/go-amino/issues/9
// is resolved
RegisterAmino(cdc)
}
// RegisterAmino registers all crypto related types in the given (amino) codec.
func RegisterAmino(cdc *amino.Codec) {
// These are all written here instead of
cdc.RegisterInterface((*crypto.PubKey)(nil), nil)
cdc.RegisterConcrete(ed25519.PubKeyEd25519{},
ed25519.PubKeyAminoRoute, nil)
cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{},
secp256k1.PubKeyAminoRoute, nil)
cdc.RegisterConcrete(multisig.PubKeyMultisigThreshold{},
multisig.PubKeyMultisigThresholdAminoRoute, nil)
cdc.RegisterInterface((*crypto.PrivKey)(nil), nil)
cdc.RegisterConcrete(ed25519.PrivKeyEd25519{},
ed25519.PrivKeyAminoRoute, nil)
cdc.RegisterConcrete(secp256k1.PrivKeySecp256k1{},
secp256k1.PrivKeyAminoRoute, nil)
}
func PrivKeyFromBytes(privKeyBytes []byte) (privKey crypto.PrivKey, err error) {
err = cdc.UnmarshalBinaryBare(privKeyBytes, &privKey)
return
}
func PubKeyFromBytes(pubKeyBytes []byte) (pubKey crypto.PubKey, err error) {
err = cdc.UnmarshalBinaryBare(pubKeyBytes, &pubKey)
return
}

View File

@@ -1,4 +1,4 @@
package crypto
package cryptoAmino
import (
"os"
@@ -6,18 +6,23 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/secp256k1"
)
type byter interface {
Bytes() []byte
}
func checkAminoBinary(t *testing.T, src byter, dst interface{}, size int) {
func checkAminoBinary(t *testing.T, src, dst interface{}, size int) {
// Marshal to binary bytes.
bz, err := cdc.MarshalBinaryBare(src)
require.Nil(t, err, "%+v", err)
// Make sure this is compatible with current (Bytes()) encoding.
assert.Equal(t, src.Bytes(), bz, "Amino binary vs Bytes() mismatch")
if byterSrc, ok := src.(byter); ok {
// Make sure this is compatible with current (Bytes()) encoding.
assert.Equal(t, byterSrc.Bytes(), bz, "Amino binary vs Bytes() mismatch")
}
// Make sure we have the expected length.
if size != -1 {
assert.Equal(t, size, len(bz), "Amino binary size mismatch")
@@ -48,72 +53,77 @@ func ExamplePrintRegisteredTypes() {
//| ---- | ---- | ------ | ----- | ------ |
//| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | |
//| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | |
//| PubKeyMultisigThreshold | tendermint/PubKeyMultisigThreshold | 0x22C1F7E2 | variable | |
//| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | |
//| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | |
//| SignatureEd25519 | tendermint/SignatureEd25519 | 0x2031EA53 | 0x40 | |
//| SignatureSecp256k1 | tendermint/SignatureSecp256k1 | 0x7FC4A495 | variable | |
}
func TestKeyEncodings(t *testing.T) {
cases := []struct {
privKey PrivKey
privSize, pubSize int // binary sizes
privKey crypto.PrivKey
privSize, pubSize, sigSize int // binary sizes
}{
{
privKey: GenPrivKeyEd25519(),
privKey: ed25519.GenPrivKey(),
privSize: 69,
pubSize: 37,
sigSize: 65,
},
{
privKey: GenPrivKeySecp256k1(),
privKey: secp256k1.GenPrivKey(),
privSize: 37,
pubSize: 38,
sigSize: 65,
},
}
for _, tc := range cases {
for tcIndex, tc := range cases {
// Check (de/en)codings of PrivKeys.
var priv2, priv3 PrivKey
var priv2, priv3 crypto.PrivKey
checkAminoBinary(t, tc.privKey, &priv2, tc.privSize)
assert.EqualValues(t, tc.privKey, priv2)
assert.EqualValues(t, tc.privKey, priv2, "tc #%d", tcIndex)
checkAminoJSON(t, tc.privKey, &priv3, false) // TODO also check Prefix bytes.
assert.EqualValues(t, tc.privKey, priv3)
assert.EqualValues(t, tc.privKey, priv3, "tc #%d", tcIndex)
// Check (de/en)codings of Signatures.
var sig1, sig2, sig3 Signature
var sig1, sig2 []byte
sig1, err := tc.privKey.Sign([]byte("something"))
assert.NoError(t, err)
checkAminoBinary(t, sig1, &sig2, -1) // Signature size changes for Secp anyways.
assert.EqualValues(t, sig1, sig2)
checkAminoJSON(t, sig1, &sig3, false) // TODO also check Prefix bytes.
assert.EqualValues(t, sig1, sig3)
assert.NoError(t, err, "tc #%d", tcIndex)
checkAminoBinary(t, sig1, &sig2, tc.sigSize)
assert.EqualValues(t, sig1, sig2, "tc #%d", tcIndex)
// Check (de/en)codings of PubKeys.
pubKey := tc.privKey.PubKey()
var pub2, pub3 PubKey
var pub2, pub3 crypto.PubKey
checkAminoBinary(t, pubKey, &pub2, tc.pubSize)
assert.EqualValues(t, pubKey, pub2)
assert.EqualValues(t, pubKey, pub2, "tc #%d", tcIndex)
checkAminoJSON(t, pubKey, &pub3, false) // TODO also check Prefix bytes.
assert.EqualValues(t, pubKey, pub3)
assert.EqualValues(t, pubKey, pub3, "tc #%d", tcIndex)
}
}
func TestNilEncodings(t *testing.T) {
// Check nil Signature.
var a, b Signature
var a, b []byte
checkAminoJSON(t, &a, &b, true)
assert.EqualValues(t, a, b)
// Check nil PubKey.
var c, d PubKey
var c, d crypto.PubKey
checkAminoJSON(t, &c, &d, true)
assert.EqualValues(t, c, d)
// Check nil PrivKey.
var e, f PrivKey
var e, f crypto.PrivKey
checkAminoJSON(t, &e, &f, true)
assert.EqualValues(t, e, f)
}
func TestPubKeyInvalidDataProperReturnsEmpty(t *testing.T) {
pk, err := PubKeyFromBytes([]byte("foo"))
require.NotNil(t, err, "expecting a non-nil error")
require.Nil(t, pk, "expecting an empty public key on error")
}

View File

@@ -2,6 +2,7 @@ package crypto
import (
"crypto/sha256"
"golang.org/x/crypto/ripemd160"
)

View File

@@ -1,105 +0,0 @@
// Package hkdfchacha20poly1305 creates an AEAD using hkdf, chacha20, and poly1305
// When sealing and opening, the hkdf is used to obtain the nonce and subkey for
// chacha20. Other than the change for the how the subkey and nonce for chacha
// are obtained, this is the same as chacha20poly1305
package hkdfchacha20poly1305
import (
"crypto/cipher"
"crypto/sha256"
"errors"
"io"
"golang.org/x/crypto/chacha20poly1305"
"golang.org/x/crypto/hkdf"
)
type hkdfchacha20poly1305 struct {
key [KeySize]byte
}
const (
// KeySize is the size of the key used by this AEAD, in bytes.
KeySize = 32
// NonceSize is the size of the nonce used with this AEAD, in bytes.
NonceSize = 24
// TagSize is the size added from poly1305
TagSize = 16
// MaxPlaintextSize is the max size that can be passed into a single call of Seal
MaxPlaintextSize = (1 << 38) - 64
// MaxCiphertextSize is the max size that can be passed into a single call of Open,
// this differs from plaintext size due to the tag
MaxCiphertextSize = (1 << 38) - 48
// HkdfInfo is the parameter used internally for Hkdf's info parameter.
HkdfInfo = "TENDERMINT_SECRET_CONNECTION_FRAME_KEY_DERIVE"
)
//New xChaChapoly1305 AEAD with 24 byte nonces
func New(key []byte) (cipher.AEAD, error) {
if len(key) != KeySize {
return nil, errors.New("chacha20poly1305: bad key length")
}
ret := new(hkdfchacha20poly1305)
copy(ret.key[:], key)
return ret, nil
}
func (c *hkdfchacha20poly1305) NonceSize() int {
return NonceSize
}
func (c *hkdfchacha20poly1305) Overhead() int {
return TagSize
}
func (c *hkdfchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
if len(nonce) != NonceSize {
panic("hkdfchacha20poly1305: bad nonce length passed to Seal")
}
if uint64(len(plaintext)) > MaxPlaintextSize {
panic("hkdfchacha20poly1305: plaintext too large")
}
subKey, chachaNonce := getSubkeyAndChachaNonceFromHkdf(&c.key, &nonce)
aead, err := chacha20poly1305.New(subKey[:])
if err != nil {
panic("hkdfchacha20poly1305: failed to initialize chacha20poly1305")
}
return aead.Seal(dst, chachaNonce[:], plaintext, additionalData)
}
func (c *hkdfchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
if len(nonce) != NonceSize {
return nil, errors.New("hkdfchacha20poly1305: bad nonce length passed to Open")
}
if uint64(len(ciphertext)) > MaxCiphertextSize {
return nil, errors.New("hkdfchacha20poly1305: ciphertext too large")
}
subKey, chachaNonce := getSubkeyAndChachaNonceFromHkdf(&c.key, &nonce)
aead, err := chacha20poly1305.New(subKey[:])
if err != nil {
panic("hkdfchacha20poly1305: failed to initialize chacha20poly1305")
}
return aead.Open(dst, chachaNonce[:], ciphertext, additionalData)
}
func getSubkeyAndChachaNonceFromHkdf(cKey *[32]byte, nonce *[]byte) (
subKey [KeySize]byte, chachaNonce [chacha20poly1305.NonceSize]byte) {
hash := sha256.New
hkdf := hkdf.New(hash, (*cKey)[:], *nonce, []byte(HkdfInfo))
_, err := io.ReadFull(hkdf, subKey[:])
if err != nil {
panic("hkdfchacha20poly1305: failed to read subkey from hkdf")
}
_, err = io.ReadFull(hkdf, chachaNonce[:])
if err != nil {
panic("hkdfchacha20poly1305: failed to read chachaNonce from hkdf")
}
return
}

View File

@@ -0,0 +1,88 @@
package benchmarking
import (
"io"
"testing"
"github.com/tendermint/tendermint/crypto"
)
// The code in this file is adapted from agl/ed25519.
// As such it is under the following license.
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found at the bottom of this file.
type zeroReader struct{}
func (zeroReader) Read(buf []byte) (int, error) {
for i := range buf {
buf[i] = 0
}
return len(buf), nil
}
// BenchmarkKeyGeneration benchmarks the given key generation algorithm using
// a dummy reader.
func BenchmarkKeyGeneration(b *testing.B, GenerateKey func(reader io.Reader) crypto.PrivKey) {
var zero zeroReader
for i := 0; i < b.N; i++ {
GenerateKey(zero)
}
}
// BenchmarkSigning benchmarks the given signing algorithm using
// the provided privkey.
func BenchmarkSigning(b *testing.B, priv crypto.PrivKey) {
message := []byte("Hello, world!")
b.ResetTimer()
for i := 0; i < b.N; i++ {
priv.Sign(message)
}
}
// BenchmarkVerification benchmarks the given verification algorithm using
// the provided privkey on a constant message.
func BenchmarkVerification(b *testing.B, priv crypto.PrivKey) {
pub := priv.PubKey()
// use a short message, so this time doesn't get dominated by hashing.
message := []byte("Hello, world!")
signature, err := priv.Sign(message)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
pub.VerifyBytes(message, signature)
}
}
// Below is the aforementioned license.
// Copyright (c) 2012 The Go Authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -6,8 +6,9 @@ import (
cmn "github.com/tendermint/tendermint/libs/common"
. "github.com/tendermint/tendermint/libs/test"
"github.com/tendermint/tendermint/crypto/tmhash"
"testing"
"github.com/tendermint/tendermint/crypto/tmhash"
)
type testItem []byte

View File

@@ -0,0 +1,233 @@
package bitarray
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"regexp"
"strings"
)
// CompactBitArray is an implementation of a space efficient bit array.
// This is used to ensure that the encoded data takes up a minimal amount of
// space after amino encoding.
// This is not thread safe, and is not intended for concurrent usage.
type CompactBitArray struct {
ExtraBitsStored byte `json:"extra_bits"` // The number of extra bits in elems.
Elems []byte `json:"bits"`
}
// NewCompactBitArray returns a new compact bit array.
// It returns nil if the number of bits is zero.
func NewCompactBitArray(bits int) *CompactBitArray {
if bits <= 0 {
return nil
}
return &CompactBitArray{
ExtraBitsStored: byte(bits % 8),
Elems: make([]byte, (bits+7)/8),
}
}
// Size returns the number of bits in the bitarray
func (bA *CompactBitArray) Size() int {
if bA == nil {
return 0
} else if bA.ExtraBitsStored == byte(0) {
return len(bA.Elems) * 8
}
// num_bits = 8*num_full_bytes + overflow_in_last_byte
// num_full_bytes = (len(bA.Elems)-1)
return (len(bA.Elems)-1)*8 + int(bA.ExtraBitsStored)
}
// GetIndex returns the bit at index i within the bit array.
// The behavior is undefined if i >= bA.Size()
func (bA *CompactBitArray) GetIndex(i int) bool {
if bA == nil {
return false
}
if i >= bA.Size() {
return false
}
return bA.Elems[i>>3]&(uint8(1)<<uint8(7-(i%8))) > 0
}
// SetIndex sets the bit at index i within the bit array.
// The behavior is undefined if i >= bA.Size()
func (bA *CompactBitArray) SetIndex(i int, v bool) bool {
if bA == nil {
return false
}
if i >= bA.Size() {
return false
}
if v {
bA.Elems[i>>3] |= (uint8(1) << uint8(7-(i%8)))
} else {
bA.Elems[i>>3] &= ^(uint8(1) << uint8(7-(i%8)))
}
return true
}
// NumTrueBitsBefore returns the number of bits set to true before the
// given index. e.g. if bA = _XX__XX, NumOfTrueBitsBefore(4) = 2, since
// there are two bits set to true before index 4.
func (bA *CompactBitArray) NumTrueBitsBefore(index int) int {
numTrueValues := 0
for i := 0; i < index; i++ {
if bA.GetIndex(i) {
numTrueValues++
}
}
return numTrueValues
}
// Copy returns a copy of the provided bit array.
func (bA *CompactBitArray) Copy() *CompactBitArray {
if bA == nil {
return nil
}
c := make([]byte, len(bA.Elems))
copy(c, bA.Elems)
return &CompactBitArray{
ExtraBitsStored: bA.ExtraBitsStored,
Elems: c,
}
}
// String returns a string representation of CompactBitArray: BA{<bit-string>},
// where <bit-string> is a sequence of 'x' (1) and '_' (0).
// The <bit-string> includes spaces and newlines to help people.
// For a simple sequence of 'x' and '_' characters with no spaces or newlines,
// see the MarshalJSON() method.
// Example: "BA{_x_}" or "nil-BitArray" for nil.
func (bA *CompactBitArray) String() string {
return bA.StringIndented("")
}
// StringIndented returns the same thing as String(), but applies the indent
// at every 10th bit, and twice at every 50th bit.
func (bA *CompactBitArray) StringIndented(indent string) string {
if bA == nil {
return "nil-BitArray"
}
lines := []string{}
bits := ""
size := bA.Size()
for i := 0; i < size; i++ {
if bA.GetIndex(i) {
bits += "x"
} else {
bits += "_"
}
if i%100 == 99 {
lines = append(lines, bits)
bits = ""
}
if i%10 == 9 {
bits += indent
}
if i%50 == 49 {
bits += indent
}
}
if len(bits) > 0 {
lines = append(lines, bits)
}
return fmt.Sprintf("BA{%v:%v}", size, strings.Join(lines, indent))
}
// MarshalJSON implements json.Marshaler interface by marshaling bit array
// using a custom format: a string of '-' or 'x' where 'x' denotes the 1 bit.
func (bA *CompactBitArray) MarshalJSON() ([]byte, error) {
if bA == nil {
return []byte("null"), nil
}
bits := `"`
size := bA.Size()
for i := 0; i < size; i++ {
if bA.GetIndex(i) {
bits += `x`
} else {
bits += `_`
}
}
bits += `"`
return []byte(bits), nil
}
var bitArrayJSONRegexp = regexp.MustCompile(`\A"([_x]*)"\z`)
// UnmarshalJSON implements json.Unmarshaler interface by unmarshaling a custom
// JSON description.
func (bA *CompactBitArray) UnmarshalJSON(bz []byte) error {
b := string(bz)
if b == "null" {
// This is required e.g. for encoding/json when decoding
// into a pointer with pre-allocated BitArray.
bA.ExtraBitsStored = 0
bA.Elems = nil
return nil
}
// Validate 'b'.
match := bitArrayJSONRegexp.FindStringSubmatch(b)
if match == nil {
return fmt.Errorf("BitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b)
}
bits := match[1]
// Construct new CompactBitArray and copy over.
numBits := len(bits)
bA2 := NewCompactBitArray(numBits)
for i := 0; i < numBits; i++ {
if bits[i] == 'x' {
bA2.SetIndex(i, true)
}
}
*bA = *bA2
return nil
}
// CompactMarshal is a space efficient encoding for CompactBitArray.
// It is not amino compatible.
func (bA *CompactBitArray) CompactMarshal() []byte {
size := bA.Size()
if size <= 0 {
return []byte("null")
}
bz := make([]byte, 0, size/8)
// length prefix number of bits, not number of bytes. This difference
// takes 3-4 bits in encoding, as opposed to instead encoding the number of
// bytes (saving 3-4 bits) and including the offset as a full byte.
bz = appendUvarint(bz, uint64(size))
bz = append(bz, bA.Elems...)
return bz
}
// CompactUnmarshal is a space efficient decoding for CompactBitArray.
// It is not amino compatible.
func CompactUnmarshal(bz []byte) (*CompactBitArray, error) {
if len(bz) < 2 {
return nil, errors.New("compact bit array: invalid compact unmarshal size")
} else if bytes.Equal(bz, []byte("null")) {
return NewCompactBitArray(0), nil
}
size, n := binary.Uvarint(bz)
bz = bz[n:]
if len(bz) != int(size+7)/8 {
return nil, errors.New("compact bit array: invalid compact unmarshal size")
}
bA := &CompactBitArray{byte(int(size % 8)), bz}
return bA, nil
}
func appendUvarint(b []byte, x uint64) []byte {
var a [binary.MaxVarintLen64]byte
n := binary.PutUvarint(a[:], x)
return append(b, a[:n]...)
}

View File

@@ -0,0 +1,196 @@
package bitarray
import (
"encoding/json"
"math/rand"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
)
func randCompactBitArray(bits int) (*CompactBitArray, []byte) {
numBytes := (bits + 7) / 8
src := cmn.RandBytes((bits + 7) / 8)
bA := NewCompactBitArray(bits)
for i := 0; i < numBytes-1; i++ {
for j := uint8(0); j < 8; j++ {
bA.SetIndex(i*8+int(j), src[i]&(uint8(1)<<(8-j)) > 0)
}
}
// Set remaining bits
for i := uint8(0); i < 8-uint8(bA.ExtraBitsStored); i++ {
bA.SetIndex(numBytes*8+int(i), src[numBytes-1]&(uint8(1)<<(8-i)) > 0)
}
return bA, src
}
func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) {
bitList := []int{-127, -128, -1 << 31}
for _, bits := range bitList {
bA := NewCompactBitArray(bits)
require.Nil(t, bA)
}
}
func TestJSONMarshalUnmarshal(t *testing.T) {
bA1 := NewCompactBitArray(0)
bA2 := NewCompactBitArray(1)
bA3 := NewCompactBitArray(1)
bA3.SetIndex(0, true)
bA4 := NewCompactBitArray(5)
bA4.SetIndex(0, true)
bA4.SetIndex(1, true)
bA5 := NewCompactBitArray(9)
bA5.SetIndex(0, true)
bA5.SetIndex(1, true)
bA5.SetIndex(8, true)
bA6 := NewCompactBitArray(16)
bA6.SetIndex(0, true)
bA6.SetIndex(1, true)
bA6.SetIndex(8, false)
bA6.SetIndex(15, true)
testCases := []struct {
bA *CompactBitArray
marshalledBA string
}{
{nil, `null`},
{bA1, `null`},
{bA2, `"_"`},
{bA3, `"x"`},
{bA4, `"xx___"`},
{bA5, `"xx______x"`},
{bA6, `"xx_____________x"`},
}
for _, tc := range testCases {
t.Run(tc.bA.String(), func(t *testing.T) {
bz, err := json.Marshal(tc.bA)
require.NoError(t, err)
assert.Equal(t, tc.marshalledBA, string(bz))
var unmarshalledBA *CompactBitArray
err = json.Unmarshal(bz, &unmarshalledBA)
require.NoError(t, err)
if tc.bA == nil {
require.Nil(t, unmarshalledBA)
} else {
require.NotNil(t, unmarshalledBA)
assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)
if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) {
assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)
}
}
})
}
}
func TestCompactMarshalUnmarshal(t *testing.T) {
bA1 := NewCompactBitArray(0)
bA2 := NewCompactBitArray(1)
bA3 := NewCompactBitArray(1)
bA3.SetIndex(0, true)
bA4 := NewCompactBitArray(5)
bA4.SetIndex(0, true)
bA4.SetIndex(1, true)
bA5 := NewCompactBitArray(9)
bA5.SetIndex(0, true)
bA5.SetIndex(1, true)
bA5.SetIndex(8, true)
bA6 := NewCompactBitArray(16)
bA6.SetIndex(0, true)
bA6.SetIndex(1, true)
bA6.SetIndex(8, false)
bA6.SetIndex(15, true)
testCases := []struct {
bA *CompactBitArray
marshalledBA []byte
}{
{nil, []byte("null")},
{bA1, []byte("null")},
{bA2, []byte{byte(1), byte(0)}},
{bA3, []byte{byte(1), byte(128)}},
{bA4, []byte{byte(5), byte(192)}},
{bA5, []byte{byte(9), byte(192), byte(128)}},
{bA6, []byte{byte(16), byte(192), byte(1)}},
}
for _, tc := range testCases {
t.Run(tc.bA.String(), func(t *testing.T) {
bz := tc.bA.CompactMarshal()
assert.Equal(t, tc.marshalledBA, bz)
unmarshalledBA, err := CompactUnmarshal(bz)
require.NoError(t, err)
if tc.bA == nil {
require.Nil(t, unmarshalledBA)
} else {
require.NotNil(t, unmarshalledBA)
assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)
if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) {
assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems)
}
}
})
}
}
func TestCompactBitArrayNumOfTrueBitsBefore(t *testing.T) {
testCases := []struct {
marshalledBA string
bAIndex []int
trueValueIndex []int
}{
{`"_____"`, []int{0, 1, 2, 3, 4}, []int{0, 0, 0, 0, 0}},
{`"x"`, []int{0}, []int{0}},
{`"_x"`, []int{1}, []int{0}},
{`"x___xxxx"`, []int{0, 4, 5, 6, 7}, []int{0, 1, 2, 3, 4}},
{`"__x_xx_x__x_x___"`, []int{2, 4, 5, 7, 10, 12}, []int{0, 1, 2, 3, 4, 5}},
{`"______________xx"`, []int{14, 15}, []int{0, 1}},
}
for tcIndex, tc := range testCases {
t.Run(tc.marshalledBA, func(t *testing.T) {
var bA *CompactBitArray
err := json.Unmarshal([]byte(tc.marshalledBA), &bA)
require.NoError(t, err)
for i := 0; i < len(tc.bAIndex); i++ {
require.Equal(t, tc.trueValueIndex[i], bA.NumTrueBitsBefore(tc.bAIndex[i]), "tc %d, i %d", tcIndex, i)
}
})
}
}
func TestCompactBitArrayGetSetIndex(t *testing.T) {
r := rand.New(rand.NewSource(100))
numTests := 10
numBitsPerArr := 100
for i := 0; i < numTests; i++ {
bits := r.Intn(1000)
bA, _ := randCompactBitArray(bits)
for j := 0; j < numBitsPerArr; j++ {
copy := bA.Copy()
index := r.Intn(bits)
val := (r.Int63() % 2) == 0
bA.SetIndex(index, val)
require.Equal(t, val, bA.GetIndex(index), "bA.SetIndex(%d, %v) failed on bit array: %s", index, val, copy)
}
}
}

View File

@@ -0,0 +1,70 @@
package multisig
import (
"errors"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/multisig/bitarray"
)
// Multisignature is used to represent the signature object used in the multisigs.
// Sigs is a list of signatures, sorted by corresponding index.
type Multisignature struct {
BitArray *bitarray.CompactBitArray
Sigs [][]byte
}
// NewMultisig returns a new Multisignature of size n.
func NewMultisig(n int) *Multisignature {
// Default the signature list to have a capacity of two, since we can
// expect that most multisigs will require multiple signers.
return &Multisignature{bitarray.NewCompactBitArray(n), make([][]byte, 0, 2)}
}
// GetIndex returns the index of pk in keys. Returns -1 if not found
func getIndex(pk crypto.PubKey, keys []crypto.PubKey) int {
for i := 0; i < len(keys); i++ {
if pk.Equals(keys[i]) {
return i
}
}
return -1
}
// AddSignature adds a signature to the multisig, at the corresponding index.
// If the signature already exists, replace it.
func (mSig *Multisignature) AddSignature(sig []byte, index int) {
newSigIndex := mSig.BitArray.NumTrueBitsBefore(index)
// Signature already exists, just replace the value there
if mSig.BitArray.GetIndex(index) {
mSig.Sigs[newSigIndex] = sig
return
}
mSig.BitArray.SetIndex(index, true)
// Optimization if the index is the greatest index
if newSigIndex == len(mSig.Sigs) {
mSig.Sigs = append(mSig.Sigs, sig)
return
}
// Expand slice by one with a dummy element, move all elements after i
// over by one, then place the new signature in that gap.
mSig.Sigs = append(mSig.Sigs, make([]byte, 0))
copy(mSig.Sigs[newSigIndex+1:], mSig.Sigs[newSigIndex:])
mSig.Sigs[newSigIndex] = sig
}
// AddSignatureFromPubKey adds a signature to the multisig,
// at the index in keys corresponding to the provided pubkey.
func (mSig *Multisignature) AddSignatureFromPubKey(sig []byte, pubkey crypto.PubKey, keys []crypto.PubKey) error {
index := getIndex(pubkey, keys)
if index == -1 {
return errors.New("provided key didn't exist in pubkeys")
}
mSig.AddSignature(sig, index)
return nil
}
// Marshal the multisignature with amino
func (mSig *Multisignature) Marshal() []byte {
return cdc.MustMarshalBinaryBare(mSig)
}

View File

@@ -0,0 +1,92 @@
package multisig
import (
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash"
)
// PubKeyMultisigThreshold implements a K of N threshold multisig.
type PubKeyMultisigThreshold struct {
K uint `json:"threshold"`
PubKeys []crypto.PubKey `json:"pubkeys"`
}
var _ crypto.PubKey = &PubKeyMultisigThreshold{}
// NewPubKeyMultisigThreshold returns a new PubKeyMultisigThreshold.
// Panics if len(pubkeys) < k or 0 >= k.
func NewPubKeyMultisigThreshold(k int, pubkeys []crypto.PubKey) crypto.PubKey {
if k <= 0 {
panic("threshold k of n multisignature: k <= 0")
}
if len(pubkeys) < k {
panic("threshold k of n multisignature: len(pubkeys) < k")
}
return &PubKeyMultisigThreshold{uint(k), pubkeys}
}
// VerifyBytes expects sig to be an amino encoded version of a MultiSignature.
// Returns true iff the multisignature contains k or more signatures
// for the correct corresponding keys,
// and all signatures are valid. (Not just k of the signatures)
// The multisig uses a bitarray, so multiple signatures for the same key is not
// a concern.
func (pk *PubKeyMultisigThreshold) VerifyBytes(msg []byte, marshalledSig []byte) bool {
var sig *Multisignature
err := cdc.UnmarshalBinaryBare(marshalledSig, &sig)
if err != nil {
return false
}
size := sig.BitArray.Size()
// ensure bit array is the correct size
if len(pk.PubKeys) != size {
return false
}
// ensure size of signature list
if len(sig.Sigs) < int(pk.K) || len(sig.Sigs) > size {
return false
}
// ensure at least k signatures are set
if sig.BitArray.NumTrueBitsBefore(size) < int(pk.K) {
return false
}
// index in the list of signatures which we are concerned with.
sigIndex := 0
for i := 0; i < size; i++ {
if sig.BitArray.GetIndex(i) {
if !pk.PubKeys[i].VerifyBytes(msg, sig.Sigs[sigIndex]) {
return false
}
sigIndex++
}
}
return true
}
// Bytes returns the amino encoded version of the PubKeyMultisigThreshold
func (pk *PubKeyMultisigThreshold) Bytes() []byte {
return cdc.MustMarshalBinaryBare(pk)
}
// Address returns tmhash(PubKeyMultisigThreshold.Bytes())
func (pk *PubKeyMultisigThreshold) Address() crypto.Address {
return crypto.Address(tmhash.Sum(pk.Bytes()))
}
// Equals returns true iff pk and other both have the same number of keys, and
// all constituent keys are the same, and in the same order.
func (pk *PubKeyMultisigThreshold) Equals(other crypto.PubKey) bool {
otherKey, sameType := other.(*PubKeyMultisigThreshold)
if !sameType {
return false
}
if pk.K != otherKey.K || len(pk.PubKeys) != len(otherKey.PubKeys) {
return false
}
for i := 0; i < len(pk.PubKeys); i++ {
if !pk.PubKeys[i].Equals(otherKey.PubKeys[i]) {
return false
}
}
return true
}

View File

@@ -0,0 +1,112 @@
package multisig
import (
"math/rand"
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/secp256k1"
)
// This tests multisig functionality, but it expects the first k signatures to be valid
// TODO: Adapt it to give more flexibility about first k signatures being valid
func TestThresholdMultisigValidCases(t *testing.T) {
pkSet1, sigSet1 := generatePubKeysAndSignatures(5, []byte{1, 2, 3, 4})
cases := []struct {
msg []byte
k int
pubkeys []crypto.PubKey
signingIndices []int
// signatures should be the same size as signingIndices.
signatures [][]byte
passAfterKSignatures []bool
}{
{
msg: []byte{1, 2, 3, 4},
k: 2,
pubkeys: pkSet1,
signingIndices: []int{0, 3, 1},
signatures: sigSet1,
passAfterKSignatures: []bool{false},
},
}
for tcIndex, tc := range cases {
multisigKey := NewPubKeyMultisigThreshold(tc.k, tc.pubkeys)
multisignature := NewMultisig(len(tc.pubkeys))
for i := 0; i < tc.k-1; i++ {
signingIndex := tc.signingIndices[i]
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
"multisig passed when i < k, tc %d, i %d", tcIndex, i)
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
require.Equal(t, i+1, len(multisignature.Sigs),
"adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex)
}
require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
"multisig passed with k - 1 sigs, tc %d", tcIndex)
multisignature.AddSignatureFromPubKey(tc.signatures[tc.signingIndices[tc.k]], tc.pubkeys[tc.signingIndices[tc.k]], tc.pubkeys)
require.True(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
"multisig failed after k good signatures, tc %d", tcIndex)
for i := tc.k + 1; i < len(tc.signingIndices); i++ {
signingIndex := tc.signingIndices[i]
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
require.Equal(t, tc.passAfterKSignatures[i-tc.k-1],
multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()),
"multisig didn't verify as expected after k sigs, tc %d, i %d", tcIndex, i)
multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys)
require.Equal(t, i+1, len(multisignature.Sigs),
"adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex)
}
}
}
// TODO: Fully replace this test with table driven tests
func TestThresholdMultisigDuplicateSignatures(t *testing.T) {
msg := []byte{1, 2, 3, 4, 5}
pubkeys, sigs := generatePubKeysAndSignatures(5, msg)
multisigKey := NewPubKeyMultisigThreshold(2, pubkeys)
multisignature := NewMultisig(5)
require.False(t, multisigKey.VerifyBytes(msg, multisignature.Marshal()))
multisignature.AddSignatureFromPubKey(sigs[0], pubkeys[0], pubkeys)
// Add second signature manually
multisignature.Sigs = append(multisignature.Sigs, sigs[0])
require.False(t, multisigKey.VerifyBytes(msg, multisignature.Marshal()))
}
// TODO: Fully replace this test with table driven tests
func TestMultiSigPubKeyEquality(t *testing.T) {
msg := []byte{1, 2, 3, 4}
pubkeys, _ := generatePubKeysAndSignatures(5, msg)
multisigKey := NewPubKeyMultisigThreshold(2, pubkeys)
var unmarshalledMultisig *PubKeyMultisigThreshold
cdc.MustUnmarshalBinaryBare(multisigKey.Bytes(), &unmarshalledMultisig)
require.True(t, multisigKey.Equals(unmarshalledMultisig))
// Ensure that reordering pubkeys is treated as a different pubkey
pubkeysCpy := make([]crypto.PubKey, 5)
copy(pubkeysCpy, pubkeys)
pubkeysCpy[4] = pubkeys[3]
pubkeysCpy[3] = pubkeys[4]
multisigKey2 := NewPubKeyMultisigThreshold(2, pubkeysCpy)
require.False(t, multisigKey.Equals(multisigKey2))
}
func generatePubKeysAndSignatures(n int, msg []byte) (pubkeys []crypto.PubKey, signatures [][]byte) {
pubkeys = make([]crypto.PubKey, n)
signatures = make([][]byte, n)
for i := 0; i < n; i++ {
var privkey crypto.PrivKey
if rand.Int63()%2 == 0 {
privkey = ed25519.GenPrivKey()
} else {
privkey = secp256k1.GenPrivKey()
}
pubkeys[i] = privkey.PubKey()
signatures[i], _ = privkey.Sign(msg)
}
return
}

26
crypto/multisig/wire.go Normal file
View File

@@ -0,0 +1,26 @@
package multisig
import (
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/secp256k1"
)
// TODO: Figure out API for others to either add their own pubkey types, or
// to make verify / marshal accept a cdc.
const (
PubKeyMultisigThresholdAminoRoute = "tendermint/PubKeyMultisigThreshold"
)
var cdc = amino.NewCodec()
func init() {
cdc.RegisterInterface((*crypto.PubKey)(nil), nil)
cdc.RegisterConcrete(PubKeyMultisigThreshold{},
PubKeyMultisigThresholdAminoRoute, nil)
cdc.RegisterConcrete(ed25519.PubKeyEd25519{},
ed25519.PubKeyAminoRoute, nil)
cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{},
secp256k1.PubKeyAminoRoute, nil)
}

View File

@@ -1,164 +0,0 @@
package crypto
import (
"crypto/subtle"
secp256k1 "github.com/btcsuite/btcd/btcec"
"github.com/tendermint/ed25519"
"github.com/tendermint/ed25519/extra25519"
)
func PrivKeyFromBytes(privKeyBytes []byte) (privKey PrivKey, err error) {
err = cdc.UnmarshalBinaryBare(privKeyBytes, &privKey)
return
}
//----------------------------------------
type PrivKey interface {
Bytes() []byte
Sign(msg []byte) (Signature, error)
PubKey() PubKey
Equals(PrivKey) bool
}
//-------------------------------------
var _ PrivKey = PrivKeyEd25519{}
// Implements PrivKey
type PrivKeyEd25519 [64]byte
func (privKey PrivKeyEd25519) Bytes() []byte {
return cdc.MustMarshalBinaryBare(privKey)
}
func (privKey PrivKeyEd25519) Sign(msg []byte) (Signature, error) {
privKeyBytes := [64]byte(privKey)
signatureBytes := ed25519.Sign(&privKeyBytes, msg)
return SignatureEd25519(*signatureBytes), nil
}
func (privKey PrivKeyEd25519) PubKey() PubKey {
privKeyBytes := [64]byte(privKey)
pubBytes := *ed25519.MakePublicKey(&privKeyBytes)
return PubKeyEd25519(pubBytes)
}
// Equals - you probably don't need to use this.
// Runs in constant time based on length of the keys.
func (privKey PrivKeyEd25519) Equals(other PrivKey) bool {
if otherEd, ok := other.(PrivKeyEd25519); ok {
return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1
} else {
return false
}
}
func (privKey PrivKeyEd25519) ToCurve25519() *[32]byte {
keyCurve25519 := new([32]byte)
privKeyBytes := [64]byte(privKey)
extra25519.PrivateKeyToCurve25519(keyCurve25519, &privKeyBytes)
return keyCurve25519
}
// Deterministically generates new priv-key bytes from key.
func (privKey PrivKeyEd25519) Generate(index int) PrivKeyEd25519 {
bz, err := cdc.MarshalBinaryBare(struct {
PrivKey [64]byte
Index int
}{privKey, index})
if err != nil {
panic(err)
}
newBytes := Sha256(bz)
newKey := new([64]byte)
copy(newKey[:32], newBytes)
ed25519.MakePublicKey(newKey)
return PrivKeyEd25519(*newKey)
}
func GenPrivKeyEd25519() PrivKeyEd25519 {
privKeyBytes := new([64]byte)
copy(privKeyBytes[:32], CRandBytes(32))
ed25519.MakePublicKey(privKeyBytes)
return PrivKeyEd25519(*privKeyBytes)
}
// NOTE: secret should be the output of a KDF like bcrypt,
// if it's derived from user input.
func GenPrivKeyEd25519FromSecret(secret []byte) PrivKeyEd25519 {
privKey32 := Sha256(secret) // Not Ripemd160 because we want 32 bytes.
privKeyBytes := new([64]byte)
copy(privKeyBytes[:32], privKey32)
ed25519.MakePublicKey(privKeyBytes)
return PrivKeyEd25519(*privKeyBytes)
}
//-------------------------------------
var _ PrivKey = PrivKeySecp256k1{}
// Implements PrivKey
type PrivKeySecp256k1 [32]byte
func (privKey PrivKeySecp256k1) Bytes() []byte {
return cdc.MustMarshalBinaryBare(privKey)
}
func (privKey PrivKeySecp256k1) Sign(msg []byte) (Signature, error) {
priv__, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:])
sig__, err := priv__.Sign(Sha256(msg))
if err != nil {
return nil, err
}
return SignatureSecp256k1(sig__.Serialize()), nil
}
func (privKey PrivKeySecp256k1) PubKey() PubKey {
_, pub__ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:])
var pub PubKeySecp256k1
copy(pub[:], pub__.SerializeCompressed())
return pub
}
// Equals - you probably don't need to use this.
// Runs in constant time based on length of the keys.
func (privKey PrivKeySecp256k1) Equals(other PrivKey) bool {
if otherSecp, ok := other.(PrivKeySecp256k1); ok {
return subtle.ConstantTimeCompare(privKey[:], otherSecp[:]) == 1
} else {
return false
}
}
/*
// Deterministically generates new priv-key bytes from key.
func (key PrivKeySecp256k1) Generate(index int) PrivKeySecp256k1 {
newBytes := cdc.BinarySha256(struct {
PrivKey [64]byte
Index int
}{key, index})
var newKey [64]byte
copy(newKey[:], newBytes)
return PrivKeySecp256k1(newKey)
}
*/
func GenPrivKeySecp256k1() PrivKeySecp256k1 {
privKeyBytes := [32]byte{}
copy(privKeyBytes[:], CRandBytes(32))
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKeyBytes[:])
copy(privKeyBytes[:], priv.Serialize())
return PrivKeySecp256k1(privKeyBytes)
}
// NOTE: secret should be the output of a KDF like bcrypt,
// if it's derived from user input.
func GenPrivKeySecp256k1FromSecret(secret []byte) PrivKeySecp256k1 {
privKey32 := Sha256(secret) // Not Ripemd160 because we want 32 bytes.
priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey32)
privKeyBytes := [32]byte{}
copy(privKeyBytes[:], priv.Serialize())
return PrivKeySecp256k1(privKeyBytes)
}

View File

@@ -1,60 +0,0 @@
package crypto_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/crypto"
)
func TestGeneratePrivKey(t *testing.T) {
testPriv := crypto.GenPrivKeyEd25519()
testGenerate := testPriv.Generate(1)
signBytes := []byte("something to sign")
pub := testGenerate.PubKey()
sig, err := testGenerate.Sign(signBytes)
assert.NoError(t, err)
assert.True(t, pub.VerifyBytes(signBytes, sig))
}
/*
type BadKey struct {
PrivKeyEd25519
}
func TestReadPrivKey(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// garbage in, garbage out
garbage := []byte("hjgewugfbiewgofwgewr")
XXX This test wants to register BadKey globally to crypto,
but we don't want to support that.
_, err := PrivKeyFromBytes(garbage)
require.Error(err)
edKey := GenPrivKeyEd25519()
badKey := BadKey{edKey}
cases := []struct {
key PrivKey
valid bool
}{
{edKey, true},
{badKey, false},
}
for i, tc := range cases {
data := tc.key.Bytes()
fmt.Println(">>>", data)
key, err := PrivKeyFromBytes(data)
fmt.Printf("!!! %#v\n", key, err)
if tc.valid {
assert.NoError(err, "%d", i)
assert.Equal(tc.key, key, "%d", i)
} else {
assert.Error(err, "%d: %#v", i, key)
}
}
}
*/

Some files were not shown because too many files have changed in this diff Show More