Compare commits

...

127 Commits

Author SHA1 Message Date
Zarko Milosevic
4accdb5f59 Example of client send task and monitor task 2018-04-19 12:34:42 +02:00
Anton Kaliaev
ece3f678da [docs/spec] update msg type and Tendermint behavior (#1468)
Refs #1422
2018-04-17 19:38:10 +02:00
Ethan Buchman
45a05b4726 Merge pull request #1461 from tendermint/update-changelog
update changelog
2018-04-17 09:56:53 +02:00
Anton Kaliaev
1706ce6f7f update changelog for 0.19.0 release 2018-04-13 10:50:34 +02:00
Jae Kwon
d0beaba7e8 Bump version to 0.19.0 2018-04-13 01:32:47 -07:00
Anton Kaliaev
c28784de5e Merge pull request #1453 from tendermint/fix-localnet
Fix permissions and folder structure for localnet
2018-04-12 17:50:08 +02:00
Anton Kaliaev
d06390638d [localnet] use routable IPs 2018-04-12 16:02:31 +02:00
Anton Kaliaev
3a0edc561d log error from AddrBook#AddAddress in DialPeersAsync
Refs #1434
2018-04-12 15:51:17 +02:00
Anton Kaliaev
f8ed578325 [localnet] execute cmd from root
not secure, but we don't care because it's local tooling
2018-04-12 15:51:32 +02:00
Anton Kaliaev
5babaf9a88 [localnet] fix folder permissions errors 2018-04-12 15:51:17 +02:00
Greg Szabo
c0610b2c32 Greg/localnet (#1450)
* Added new Makefile targets for local testnet running using docker

* Added localnode docker image description, some documentation and refactored to use the tendermint testnet command

* Fixes for the new tendermint testnet command

* More fixes on tendermint testnet and docker-compose

* Changed logging

* Added missing targets to phony
2018-04-12 13:15:16 +02:00
Anton Kaliaev
1db2224241 do not use mask in testnet cmd (#1451)
fix node ids
2018-04-11 20:53:33 +02:00
Anton Kaliaev
0323b03daf improve testnet cmd (#1449)
* improve testnet cmd

* allow non-validators
* configurable prefix
* populating of persistent peers

* relax permissions

* cleanup output dir every time

* do not remove dir

* remove panic comments
2018-04-11 19:40:53 +02:00
Anton Kaliaev
379f9f875b update docs for latest develop (#1448)
* update docs for latest develop

* latest_app_hash, not app_hash
2018-04-11 17:36:14 +02:00
Thomas Corbière
ab00bf7c8b standardize PRNG access (#1411)
* replace math/rand with tmlibs equivalent.

* update tmlibs dependency
2018-04-11 11:38:30 +02:00
Bric3d
64879c1e6a 1417 status response format (#1424)
* Reformated the ResultStatus

* fix misuse of ResultStatus.

* updated changelog

* Fixed tests

* fixed rpc helper tests

* fixed rpc_tests

* fixed mock/status_test

* fixed typo

* fixed ommitempty on validatorstatus and the changelog

* fixed extra line in changelog

* Updated usage of the /status json response in tests after breaking changes

* Updated remaining tests with changes after searching the codebase for usage

* Reformated the ResultStatus

* fix misuse of ResultStatus.

* updated changelog

* Fixed tests

* fixed rpc helper tests

* fixed rpc_tests

* fixed mock/status_test

* fixed typo

* fixed ommitempty on validatorstatus and the changelog

* Updated usage of the /status json response in tests after breaking changes

* Updated remaining tests with changes after searching the codebase for usage

* rebased against develop
2018-04-11 10:38:34 +02:00
Vladislav Dmitriyev
7c22e47629 Replaced NodeInfo's pubkey to ID (#1443)
* Replaced NodeInfo PubKey to NodeID

* Fixed tests and replaced NodeID with ID

* Removed unnecessary method ID()

* Fixed codec_test.go

* Fixed codec_test.go

* Removed unnecessary bracket

* Fixed all tests

* Fixed peer_set_test.go

* Fixed peer_test.go

* Fixed common_test.go

* Fixed common_test.go

* Renamed node_id to id

* Removed peer.ID() from RPC net.go

* Replaced NodeInfo pubKey to ID

* Fixed codec_test.go

* Fixed peer_set_test.go

* Fix pex_reactor_test.go

* Refactored code for privateKey initiali

* Fixed peer_set_test.go

* Fixed test.proto and removed orphan string in codec_test.go

* Fixed pointer to a string

* generate node_key when running tendermint init

* [docs] prefix IPs with node IDs

Refs #1429

* gen_node_key cmd

* [docs/specification/secure-p2p] add a note about config

* fix data race

Closes #1442

```
WARNING: DATA RACE
Write at 0x00c4209de7c8 by goroutine 23:
  github.com/tendermint/tendermint/types.(*Block).fillHeader()
      /home/vagrant/go/src/github.com/tendermint/tendermint/types/block.go:88 +0x157
  github.com/tendermint/tendermint/types.(*Block).Hash()
      /home/vagrant/go/src/github.com/tendermint/tendermint/types/block.go:104 +0x121
  github.com/tendermint/tendermint/types.(*Block).HashesTo()
      /home/vagrant/go/src/github.com/tendermint/tendermint/types/block.go:135 +0x4f
  github.com/tendermint/tendermint/consensus.(*ConsensusState).enterPrecommit()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1037 +0x182d
  github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1425 +0x1a6c
  github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1318 +0x77
  github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:581 +0x7a9
  github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:539 +0x6c3

Previous read at 0x00c4209de7c8 by goroutine 47:
  github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common.(*HexBytes).MarshalJSON()
      <autogenerated>:1 +0x52
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.invokeMarshalJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:433 +0x88
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec)._encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:82 +0x8d2
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:50 +0x10e
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSONStruct()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:348 +0x539
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec)._encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:119 +0x83f
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:50 +0x10e
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSONStruct()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:348 +0x539
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec)._encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:119 +0x83f
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:50 +0x10e
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSONStruct()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:348 +0x539
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec)._encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:119 +0x83f
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:50 +0x10e
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSONStruct()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:348 +0x539
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec)._encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:119 +0x83f
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:50 +0x10e
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).MarshalJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/amino.go:296 +0x182
  github.com/tendermint/tendermint/rpc/lib/types.NewRPCSuccessResponse()
      /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/lib/types/types.go:100 +0x12c
  github.com/tendermint/tendermint/rpc/lib/server.makeJSONRPCHandler.func1()
      /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/lib/server/handlers.go:152 +0xab7
  net/http.HandlerFunc.ServeHTTP()
      /usr/lib/go-1.9/src/net/http/server.go:1918 +0x51
  net/http.(*ServeMux).ServeHTTP()
      /usr/lib/go-1.9/src/net/http/server.go:2254 +0xa2
  github.com/tendermint/tendermint/rpc/lib/server.RecoverAndLogHandler.func1()
      /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:138 +0x4fa
  net/http.HandlerFunc.ServeHTTP()
      /usr/lib/go-1.9/src/net/http/server.go:1918 +0x51
  net/http.serverHandler.ServeHTTP()
      /usr/lib/go-1.9/src/net/http/server.go:2619 +0xbc
  net/http.(*conn).serve()
      /usr/lib/go-1.9/src/net/http/server.go:1801 +0x83b

Goroutine 23 (running) created at:
  github.com/tendermint/tendermint/consensus.(*ConsensusState).OnStart()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:250 +0x35b
  github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common.(*BaseService).Start()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common/service.go:130 +0x5fc
  github.com/tendermint/tendermint/consensus.(*ConsensusReactor).OnStart()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/reactor.go:69 +0x1b4
  github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common.(*BaseService).Start()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common/service.go:130 +0x5fc
  github.com/tendermint/tendermint/consensus.(*ConsensusReactor).Start()
      <autogenerated>:1 +0x43
  github.com/tendermint/tendermint/p2p.(*Switch).OnStart()
      /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch.go:177 +0x124
  github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common.(*BaseService).Start()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common/service.go:130 +0x5fc
  github.com/tendermint/tendermint/node.(*Node).OnStart()
      /home/vagrant/go/src/github.com/tendermint/tendermint/node/node.go:416 +0xa1b
  github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common.(*BaseService).Start()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common/service.go:130 +0x5fc
  github.com/tendermint/tendermint/rpc/test.StartTendermint()
      /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/test/helpers.go:100 +0x5b
  github.com/tendermint/tendermint/rpc/client_test.TestMain()
      /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/client/main_test.go:17 +0x4c
  main.main()
      github.com/tendermint/tendermint/rpc/client/_test/_testmain.go:76 +0x1cd

Goroutine 47 (running) created at:
  net/http.(*Server).Serve()
      /usr/lib/go-1.9/src/net/http/server.go:2720 +0x37c
  net/http.Serve()
      /usr/lib/go-1.9/src/net/http/server.go:2323 +0xe2
  github.com/tendermint/tendermint/rpc/lib/server.StartHTTPServer.func1()
      /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:35 +0xb3
```

* removed excessive comment

Refs https://github.com/tendermint/tendermint/pull/1446#discussion_r180353446

* use the tag interface for pubsub. (#1438)

* use the tag interface for pubsub.

* update tmlibs.

* Fix unresolved conflict.

* improve `show_node_id` (#1433)

* fix show_node_id

* make LoadNodeKey public

* make LoadNodeKey public

* remove if

* remove if
2018-04-11 10:11:11 +02:00
suyuhuang
384b3ea065 improve show_node_id (#1433)
* fix show_node_id

* make LoadNodeKey public

* make LoadNodeKey public

* remove if

* remove if
2018-04-10 16:03:51 +02:00
Thomas Corbière
6a48bd0c88 use the tag interface for pubsub. (#1438)
* use the tag interface for pubsub.

* update tmlibs.

* Fix unresolved conflict.
2018-04-10 16:03:03 +02:00
Ethan Buchman
d93e177a69 Merge pull request #1446 from tendermint/1442-data-race-fix-attempt
fix data race
2018-04-10 16:49:36 +03:00
Anton Kaliaev
cef053386b Merge pull request #1439 from tendermint/1429-add-docs-for-node-ids
docs: update docs to include IDs or set auth_enc to false
2018-04-10 11:46:48 +02:00
Anton Kaliaev
cca1dd8e3e removed excessive comment
Refs https://github.com/tendermint/tendermint/pull/1446#discussion_r180353446
2018-04-10 11:36:31 +02:00
Anton Kaliaev
26c38e770e fix data race
Closes #1442

```
WARNING: DATA RACE
Write at 0x00c4209de7c8 by goroutine 23:
  github.com/tendermint/tendermint/types.(*Block).fillHeader()
      /home/vagrant/go/src/github.com/tendermint/tendermint/types/block.go:88 +0x157
  github.com/tendermint/tendermint/types.(*Block).Hash()
      /home/vagrant/go/src/github.com/tendermint/tendermint/types/block.go:104 +0x121
  github.com/tendermint/tendermint/types.(*Block).HashesTo()
      /home/vagrant/go/src/github.com/tendermint/tendermint/types/block.go:135 +0x4f
  github.com/tendermint/tendermint/consensus.(*ConsensusState).enterPrecommit()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1037 +0x182d
  github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1425 +0x1a6c
  github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1318 +0x77
  github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:581 +0x7a9
  github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:539 +0x6c3

Previous read at 0x00c4209de7c8 by goroutine 47:
  github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common.(*HexBytes).MarshalJSON()
      <autogenerated>:1 +0x52
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.invokeMarshalJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:433 +0x88
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec)._encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:82 +0x8d2
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:50 +0x10e
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSONStruct()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:348 +0x539
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec)._encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:119 +0x83f
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:50 +0x10e
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSONStruct()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:348 +0x539
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec)._encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:119 +0x83f
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:50 +0x10e
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSONStruct()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:348 +0x539
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec)._encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:119 +0x83f
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:50 +0x10e
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSONStruct()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:348 +0x539
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec)._encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:119 +0x83f
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).encodeReflectJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/json-encode.go:50 +0x10e
  github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino.(*Codec).MarshalJSON()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/go-amino/amino.go:296 +0x182
  github.com/tendermint/tendermint/rpc/lib/types.NewRPCSuccessResponse()
      /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/lib/types/types.go:100 +0x12c
  github.com/tendermint/tendermint/rpc/lib/server.makeJSONRPCHandler.func1()
      /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/lib/server/handlers.go:152 +0xab7
  net/http.HandlerFunc.ServeHTTP()
      /usr/lib/go-1.9/src/net/http/server.go:1918 +0x51
  net/http.(*ServeMux).ServeHTTP()
      /usr/lib/go-1.9/src/net/http/server.go:2254 +0xa2
  github.com/tendermint/tendermint/rpc/lib/server.RecoverAndLogHandler.func1()
      /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:138 +0x4fa
  net/http.HandlerFunc.ServeHTTP()
      /usr/lib/go-1.9/src/net/http/server.go:1918 +0x51
  net/http.serverHandler.ServeHTTP()
      /usr/lib/go-1.9/src/net/http/server.go:2619 +0xbc
  net/http.(*conn).serve()
      /usr/lib/go-1.9/src/net/http/server.go:1801 +0x83b

Goroutine 23 (running) created at:
  github.com/tendermint/tendermint/consensus.(*ConsensusState).OnStart()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:250 +0x35b
  github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common.(*BaseService).Start()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common/service.go:130 +0x5fc
  github.com/tendermint/tendermint/consensus.(*ConsensusReactor).OnStart()
      /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/reactor.go:69 +0x1b4
  github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common.(*BaseService).Start()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common/service.go:130 +0x5fc
  github.com/tendermint/tendermint/consensus.(*ConsensusReactor).Start()
      <autogenerated>:1 +0x43
  github.com/tendermint/tendermint/p2p.(*Switch).OnStart()
      /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch.go:177 +0x124
  github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common.(*BaseService).Start()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common/service.go:130 +0x5fc
  github.com/tendermint/tendermint/node.(*Node).OnStart()
      /home/vagrant/go/src/github.com/tendermint/tendermint/node/node.go:416 +0xa1b
  github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common.(*BaseService).Start()
      /home/vagrant/go/src/github.com/tendermint/tendermint/vendor/github.com/tendermint/tmlibs/common/service.go:130 +0x5fc
  github.com/tendermint/tendermint/rpc/test.StartTendermint()
      /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/test/helpers.go:100 +0x5b
  github.com/tendermint/tendermint/rpc/client_test.TestMain()
      /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/client/main_test.go:17 +0x4c
  main.main()
      github.com/tendermint/tendermint/rpc/client/_test/_testmain.go:76 +0x1cd

Goroutine 47 (running) created at:
  net/http.(*Server).Serve()
      /usr/lib/go-1.9/src/net/http/server.go:2720 +0x37c
  net/http.Serve()
      /usr/lib/go-1.9/src/net/http/server.go:2323 +0xe2
  github.com/tendermint/tendermint/rpc/lib/server.StartHTTPServer.func1()
      /home/vagrant/go/src/github.com/tendermint/tendermint/rpc/lib/server/http_server.go:35 +0xb3
```
2018-04-10 11:15:16 +02:00
Anton Kaliaev
609452958c [docs/specification/secure-p2p] add a note about config 2018-04-09 17:02:48 +02:00
Anton Kaliaev
c954fca376 gen_node_key cmd 2018-04-09 17:02:47 +02:00
Anton Kaliaev
9be16d56ba [docs] prefix IPs with node IDs
Refs #1429
2018-04-09 17:02:47 +02:00
Anton Kaliaev
2b732bc11a generate node_key when running tendermint init 2018-04-09 17:02:47 +02:00
Ethan Buchman
dcd00b0e68 update deps and changelog 2018-04-09 16:36:42 +03:00
Ethan Buchman
ff3f35c5f4 Merge pull request #1347 from tendermint/jae/aminoify
Convert Tendermint to use GoAmino
2018-04-09 16:24:38 +03:00
Ethan Buchman
93c4312cdd Merge pull request #1432 from tendermint/bucky/aminoify
Bucky/aminoify
2018-04-09 15:20:36 +03:00
Ethan Buchman
1a1e4e767b check max msg size in DecodeMessage 2018-04-09 15:18:47 +03:00
Ethan Buchman
bb1b249e8a types: lock block on MakePartSet 2018-04-09 15:04:59 +03:00
Ethan Buchman
c778d7f5d1 fix addresses 2018-04-07 23:13:41 +03:00
Ethan Buchman
bb9b12d67a add scripts/wire2amino.go 2018-04-07 22:04:28 +03:00
Ethan Buchman
767521ac52 update test/p2p/data for amino 2018-04-07 22:03:48 +03:00
Ethan Buchman
df9bf60b05 forgot Gopkg.lock 2018-04-07 20:59:13 +03:00
Ethan Buchman
466c3ab1c7 forgot node/wire.go 2018-04-07 19:53:29 +03:00
Ethan Buchman
c68d406195 fix tests 2018-04-07 19:47:19 +03:00
Ethan Buchman
02c0835e9b fixes post merge 2018-04-07 16:25:10 +03:00
Ethan Buchman
c170800fbd Merge branch 'develop' into jae/aminoify 2018-04-07 16:16:53 +03:00
Jae Kwon
7afe74a963 Update go-crypto to 0.6.1 and change config/toml.go privval address 2018-04-07 02:01:45 -07:00
Jae Kwon
02531ca5a3 Fix race testing (cont;) Bump version to 0.19.0 2018-04-06 17:06:46 -07:00
Jae Kwon
d24e4cb821 Fix race testing 2018-04-06 17:02:29 -07:00
Jae Kwon
fb64314d1c Review from Anton 2018-04-06 13:46:40 -07:00
Ethan Buchman
4930b61a38 Merge pull request #1431 from tendermint/release/v0.18.0
Release/v0.18.0
2018-04-06 23:19:09 +03:00
Ethan Buchman
9cc2cf362f changelog and version 2018-04-06 23:03:27 +03:00
Ethan Buchman
ed93fb34ab Merge pull request #1350 from tendermint/1275-p2p-loopbacks
p2p: loopbacks should be detected and ignored instead of dialling self infinitely
2018-04-06 18:59:05 +03:00
Anton Kaliaev
3d32474da8 make linter happy 2018-04-06 13:26:05 +02:00
Anton Kaliaev
3233c318ea only log errors, dial correct addresses
"this means if there are lookup errors or typos in the persistent_peers,
tendermint will fail to start ? didn't some one ask for us not to do
this previously ?"
2018-04-06 12:35:48 +02:00
Jae Kwon
32e1d195a0 Fix cmd and lite 2018-04-05 22:05:30 -07:00
Jae Kwon
3ca5292dc9 Fix rpc tests 2018-04-05 21:19:14 -07:00
Jae Kwon
c541d58d2f WIP: fix rpc/core 2018-04-05 16:07:29 -07:00
Jae Kwon
3037b5b7ca Fix rpc/lib/... 2018-04-05 15:45:11 -07:00
Ethan Buchman
c9a263c589 Merge pull request #1389 from tendermint/1380-trim-whitespaces
trim whitespace from elements of lists (like `persistent_peers`)
2018-04-05 18:22:46 +03:00
Jae Kwon
e4492afbad Merge 2018-04-05 08:17:10 -07:00
Ethan Buchman
799beebd36 fix consensus tests 2018-04-05 17:54:26 +03:00
Jae Kwon
45ec5fd170 WIP consensus 2018-04-05 07:05:45 -07:00
Anton Kaliaev
6e39ec6e26 do not even try to dial ourselves
also, remove address from the book (plus mark it as our address)
and return an error if we fail to parse peers list
2018-04-05 15:45:52 +02:00
Anton Kaliaev
d38a6cc7ea trim whitespace from elements of lists (like persistent_peers)
Refs #1380
2018-04-05 16:42:26 +03:00
Anton Kaliaev
7f6ee7a46b add a comment for NewSwitch 2018-04-05 15:27:47 +02:00
Anton Kaliaev
34b77fcad4 log error when we fail to add new address 2018-04-05 15:27:47 +02:00
Anton Kaliaev
3b3f45d49b use addrbook#AddOurAddress to store our address 2018-04-05 15:27:47 +02:00
Anton Kaliaev
3284a13fee add test
Refs #1275
2018-04-05 15:27:47 +02:00
Anton Kaliaev
fc9ffee2e3 remove unused tracking because it leads to memory leaks in tests
see https://blog.cosmos.network/debugging-the-memory-leak-in-tendermint-210186711420
2018-04-05 15:27:47 +02:00
Anton Kaliaev
3a672cb2a9 update changelog [ci skip] 2018-04-05 15:27:46 +02:00
Anton Kaliaev
4b8e342309 fix panic: lookup testing on 10.0.2.3:53: no such host 2018-04-05 15:27:46 +02:00
Anton Kaliaev
5a2fa71b03 use combination of IP and port, not just IP 2018-04-05 15:27:46 +02:00
Anton Kaliaev
9a57ef9cbf do not dial ourselves (ok, maybe just once)
Refs #1275
2018-04-05 15:27:46 +02:00
Ethan Buchman
59ca9bf480 update to tmlibs v0.8.1 2018-04-05 16:16:36 +03:00
Ethan Buchman
7cce07bc99 Merge pull request #1352 from tendermint/1228-require-id
p2p: require all addresses come with an ID no matter what
2018-04-05 15:55:41 +03:00
Ethan Buchman
0ae66f75ce Merge pull request #1420 from tendermint/1414-data-race
protect Record* peerStateStats functions by mutex
2018-04-05 15:53:15 +03:00
Jae Kwon
5d1c758730 Fix evidence 2018-04-05 05:43:23 -07:00
Jae Kwon
1b9323f105 Fix blockchain tests 2018-04-05 05:17:43 -07:00
Anton Kaliaev
cee7b5cb54 GetSelectionWithBias
Refs #1130
2018-04-05 12:00:16 +02:00
Anton Kaliaev
1585152341 https://github.com/tendermint/tendermint/pull/1128#discussion_r162799294
Refs #1130
2018-04-05 12:00:16 +02:00
Anton Kaliaev
8e699c2bfd defaultSeedDisconnectWaitPeriod should be at least as long as we expect
it to take for a peer to become MarkGood

Refs #1130
2018-04-05 12:00:16 +02:00
Anton Kaliaev
904a3115a6 require addresses to have an ID by default
Refs #1228
2018-04-05 11:55:29 +02:00
Anton Kaliaev
a506cf47ad protect Record* peerStateStats functions by mutex
Fixes #1414

DATA RACE:
```
Read at 0x00c4214ee940 by goroutine 146:
  github.com/tendermint/tendermint/consensus.(*peerStateStats).String()
      <autogenerated>:1 +0x57
  fmt.(*pp).handleMethods()
      /usr/local/go/src/fmt/print.go:596 +0x3f4
  fmt.(*pp).printArg()
      /usr/local/go/src/fmt/print.go:679 +0x11f
  fmt.(*pp).doPrintf()
      /usr/local/go/src/fmt/print.go:996 +0x319
  fmt.Sprintf()
      /usr/local/go/src/fmt/print.go:196 +0x73
  github.com/tendermint/tendermint/consensus.(*PeerState).StringIndented()
      github.com/tendermint/tendermint/consensus/_test/_obj_test/reactor.go:1426 +0x573
  github.com/tendermint/tendermint/consensus.(*PeerState).String()
      github.com/tendermint/tendermint/consensus/_test/_obj_test/reactor.go:1419 +0x66
  github.com/go-logfmt/logfmt.safeString()
      /home/ubuntu/go/src/github.com/go-logfmt/logfmt/encode.go:299 +0x9d
  github.com/go-logfmt/logfmt.writeValue()
      /home/ubuntu/go/src/github.com/go-logfmt/logfmt/encode.go:217 +0x5a0
  github.com/go-logfmt/logfmt.(*Encoder).EncodeKeyval()
      /home/ubuntu/go/src/github.com/go-logfmt/logfmt/encode.go:61 +0x1dd
  github.com/tendermint/tmlibs/log.tmfmtLogger.Log()
      /home/ubuntu/go/src/github.com/tendermint/tmlibs/log/tmfmt_logger.go:107 +0x1001
  github.com/tendermint/tmlibs/log.(*tmfmtLogger).Log()
      <autogenerated>:1 +0x93
  github.com/go-kit/kit/log.(*context).Log()
      /home/ubuntu/go/src/github.com/go-kit/kit/log/log.go:124 +0x248
  github.com/tendermint/tmlibs/log.(*tmLogger).Debug()
      /home/ubuntu/go/src/github.com/tendermint/tmlibs/log/tm_logger.go:64 +0x1d0
  github.com/tendermint/tendermint/consensus.(*PeerState).PickSendVote()
      github.com/tendermint/tendermint/consensus/_test/_obj_test/reactor.go:1059 +0x242
  github.com/tendermint/tendermint/consensus.(*ConsensusReactor).gossipVotesForHeight()
      github.com/tendermint/tendermint/consensus/_test/_obj_test/reactor.go:789 +0x6ef
  github.com/tendermint/tendermint/consensus.(*ConsensusReactor).gossipVotesRoutine()
      github.com/tendermint/tendermint/consensus/_test/_obj_test/reactor.go:723 +0x1039

Previous write at 0x00c4214ee940 by goroutine 21:
  github.com/tendermint/tendermint/consensus.(*PeerState).RecordVote()
      github.com/tendermint/tendermint/consensus/_test/_obj_test/reactor.go:1242 +0x15a
  github.com/tendermint/tendermint/consensus.(*ConsensusReactor).Receive()
      github.com/tendermint/tendermint/consensus/_test/_obj_test/reactor.go:309 +0x32e6
  github.com/tendermint/tendermint/p2p.createMConnection.func1()
      /home/ubuntu/go/src/github.com/tendermint/tendermint/p2p/peer.go:365 +0xea
  github.com/tendermint/tendermint/p2p/conn.(*MConnection).recvRoutine()
      /home/ubuntu/go/src/github.com/tendermint/tendermint/p2p/conn/connection.go:531 +0x779
```
2018-04-05 11:42:45 +02:00
Anton Kaliaev
7689c15413 Merge pull request #1378 from tendermint/bucky/disable-test-libs
comment out test_libs because of gcc dep in tmlibs
2018-04-05 11:06:30 +02:00
Zaki Manian
f907113c19 Net_info should print the ID of peers (#1312) 2018-04-05 11:02:23 +02:00
Anton Kaliaev
140f962201 Merge pull request #1406 from tendermint/docker
Update dockerfile and readme
2018-04-05 10:57:19 +02:00
Ethan Buchman
c23d907f12 Merge pull request #1391 from tendermint/581-include-validator-power
Include validator power in /status
2018-04-05 11:18:45 +03:00
Anton Kaliaev
ed782e7508 include validator's voting power in /status
Refs #581
2018-04-04 11:34:59 +02:00
Anton Kaliaev
0732526465 use more relaxing < and >= ops instead of !=
an example of Search from godocs:

```
package main

import (
	"fmt"
	"sort"
)

func main() {
	a := []int{1, 3, 6, 10, 15, 21, 28, 36, 45, 55}
	x := 6

	i := sort.Search(len(a), func(i int) bool { return a[i] >= x })
	if i < len(a) && a[i] == x {
		fmt.Printf("found %d at index %d in %v\n", x, i, a)
	} else {
		fmt.Printf("%d not found in %v\n", x, a)
	}
}
```
2018-04-04 10:42:35 +02:00
Anton Kaliaev
39a4963782 document funcs in validator_set.go 2018-04-04 10:42:35 +02:00
Anton Kaliaev
37ce6b195a ValidatorSet#GetByAddress: return -1 if no validator was found 2018-04-04 10:42:34 +02:00
Ethan Buchman
7aa6d36258 Merge pull request #1412 from tendermint/bucky/exit-conR-subscribe-routine
consensus: check for closed subscription channels and exit routine
2018-04-03 23:53:48 +03:00
Ethan Buchman
991017fc41 Merge pull request #1336 from tendermint/zarko/1308-add-light-client-spec
Add light client spec
2018-04-03 23:02:32 +03:00
Ethan Buchman
5f548c7679 consensus: close pubsub channels. fixes #1372 2018-04-03 22:57:32 +03:00
Ethan Buchman
d14aacf03e Merge pull request #1300 from tendermint/lite-proxy-hardening-and-tests
lite/proxy: Validation* tests and hardening for nil dereferences
2018-04-03 22:43:38 +03:00
Ethan Buchman
39ff4d22e9 minor cleanup 2018-04-03 22:34:18 +03:00
Jae Kwon
196f8410ba WIP commit; Fix types/results_test 2018-04-03 07:03:08 -07:00
Anton Kaliaev
8462493cbf [rpc] fix subscribing using an abci.ResponseDeliverTx tag
Refs #1369
2018-04-03 15:53:13 +02:00
Anton Kaliaev
47b8bd1728 wrote a test for EventBus#PublishEventTx
Refs #1369
2018-04-03 15:53:13 +02:00
Jae Kwon
89cdde7f1e Fix state tests 2018-04-03 06:50:53 -07:00
Ethan Buchman
657fd671ea Merge pull request #1409 from tendermint/zach/docs/tm-monitor
docs: build updates
2018-04-03 15:24:55 +03:00
Zach Ramsay
315c475b79 docs: build updates
ref: https://github.com/tendermint/tools/pull/79
2018-04-03 04:48:40 -07:00
Anton Kaliaev
b800b4ec1d update docker readme 2018-04-02 16:57:25 +02:00
Anton Kaliaev
208ac32fa2 update Dockerfile to point to 0.17.1 release 2018-04-02 16:56:07 +02:00
Anton Kaliaev
641476d40f update docker to use alpine 3.7 2018-04-02 16:55:43 +02:00
Anton Kaliaev
491c8ab4c1 [rpc/lib] log cert and key files in StartHTTPAndTLSServer 2018-04-02 15:21:05 +02:00
Anton Kaliaev
5ef8a6e887 deprecate not fully formed addresses 2018-04-02 15:21:05 +02:00
Anton Kaliaev
d694d47d22 [rpc/lib] rename vars according to Go conventions 2018-04-02 15:21:05 +02:00
Zaki Manian
ecdc1b9bb0 Add a method for creating an https server (#1403) 2018-04-02 11:36:09 +02:00
Anton Kaliaev
9c757108ca [test] remove test_libs
Reasons:
1) all deps we're using should be passing tests (including external)
2) deps can require complicated setup for testing
3) the person responsible for releasing Tendermint should be cautious
when updating a dep
2018-04-02 11:29:03 +02:00
Anton Kaliaev
5243e54641 [codecov] ignore docs, scripts and DOCKER dirs 2018-04-02 11:23:56 +02:00
Ethan Buchman
70e7454c21 comment out test_libs because of gcc dep in tmlibs 2018-04-02 11:23:56 +02:00
Thomas Corbière
2644a529f0 Fix lint errors (#1390)
* use increment and decrement operators.

* remove unnecessary else branches.

* fix package comment with leading space.

* fix receiver names.

* fix error strings.

* remove omittable code.

* remove redundant return statement.

* Revert changes (code is generated.)

* use cfg as receiver name for all config-related types.

* use lsi as the receiver name for the LastSignedInfo type.
2018-04-02 10:21:17 +02:00
Greg Szabo
eaee98ee1f CGO_ENABLED=0 added for static linking (#1396) 2018-04-01 19:54:48 +02:00
Jae Kwon
35a1d747b0 Fix mempool 2018-03-31 11:51:32 +02:00
Jae Kwon
34974e3932 Make types use Amino; Refactor PrivValidator* to FilePV/SocketPV 2018-03-31 00:18:43 +02:00
Alex Hernandez
575a46d9d4 fix typo on block header (#1387) 2018-03-29 11:28:29 +02:00
Ethan Buchman
bcadbd1b10 Merge pull request #1376 from tendermint/1368-unsubscribe-does-not-work
[rpc] unsubscribe does not work
2018-03-28 15:13:40 -04:00
Tomoya Ishizaki
ead9daf1ba Fix code style (#1362)
* cfg: Uniform style for method args and var names
2018-03-28 13:40:47 -04:00
Anton Kaliaev
22949e6dfd new tmlibs Parallel implementation 2018-03-28 19:13:08 +02:00
Anton Kaliaev
49986b05bc update tmlibs
Refs #1376
2018-03-28 19:12:52 +02:00
Vladislav Dmitriyev
2fa7af4614 [lite] fixed listen address (#1384) 2018-03-28 15:59:09 +02:00
Anton Kaliaev
2d857c4b1b add hash field to ResultTx (/tx and /tx_search endpoints) (#1374)
Refs #1367
2018-03-28 15:44:58 +02:00
Anton Kaliaev
2b63f57b4c fix tx_indexer's matchRange
before we're using IteratePrefix, which is wrong because we want full
range, not just "account.number=1".
2018-03-28 15:02:54 +02:00
Anton Kaliaev
4085c72496 sort /tx_search results by height by default
Refs #1366
2018-03-28 15:02:54 +02:00
Jae Kwon
901b456151 P2P now works with Amino 2018-03-26 06:40:02 +02:00
Emmanuel T Odeke
8813684040 lite/proxy: consolidate some common test headers into a variable
Addressing some feedback from @ebuchman in regards to
consolidating some common test headers into a variable.

I've added that for simple cases, trying to meet in the middle
instead of creating helpers that obscure readibility and easy
comparison of test cases.
2018-03-25 00:27:42 -06:00
Emmanuel T Odeke
58f36bb321 Review feedback from @melekes
* Fix typo on naming s/deabBeef/deadBeef/g
* Use `assert.*(t,` instead of `assert.New(t);...;assert.*(`
2018-03-24 23:54:01 -06:00
Emmanuel T Odeke
4c2f56626a lite/proxy: Validation* tests and hardening for nil dereferences
Updates https://github.com/tendermint/tendermint/issues/1017

Ensure that the Validate* functions in proxy are tests
and cover the case of sneakish bugs that have been encountered
a few times from nil dereferences. The lite package should
theoretically never panic with a nil dereference. It is meant
to contain the certifiers hence it should never panic with such.

Requires the following bugs to be fixed first;
* https://github.com/tendermint/tendermint/issues/1298
* https://github.com/tendermint/tendermint/issues/1299
2018-03-24 23:54:01 -06:00
Zarko Milosevic
416f03c05b Add light client spec 2018-03-21 10:00:18 +01:00
Jae Kwon
ced74251e9 maxPacketMsg -> packetMsgMax... 2018-03-21 02:47:38 +01:00
Jae Kwon
6c345f9fa2 First stab: p2p/conn 2018-03-21 02:27:10 +01:00
229 changed files with 5575 additions and 4948 deletions

View File

@@ -130,19 +130,6 @@ jobs:
paths: paths:
- "profiles/*" - "profiles/*"
test_libs:
<<: *defaults
steps:
- attach_workspace:
at: /tmp/workspace
- restore_cache:
key: v1-pkg-cache
- restore_cache:
key: v1-tree-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: Run tests
command: bash test/test_libs.sh
test_persistence: test_persistence:
<<: *defaults <<: *defaults
steps: steps:
@@ -205,14 +192,6 @@ workflows:
- test_cover: - test_cover:
requires: requires:
- setup_dependencies - setup_dependencies
- test_libs:
filters:
branches:
only:
- develop
- master
requires:
- setup_dependencies
- test_persistence: - test_persistence:
requires: requires:
- setup_abci - setup_abci

1
.gitignore vendored
View File

@@ -17,6 +17,7 @@ test/logs
coverage.txt coverage.txt
docs/_build docs/_build
docs/tools docs/tools
docs/abci-spec.rst
*.log *.log
scripts/wal2json/wal2json scripts/wal2json/wal2json

View File

@@ -7,7 +7,6 @@ BREAKING CHANGES:
- Upgrade consensus for more real-time use of evidence - Upgrade consensus for more real-time use of evidence
FEATURES: FEATURES:
- Peer reputation management
- Use the chain as its own CA for nodes and validators - Use the chain as its own CA for nodes and validators
- Tooling to run multiple blockchains/apps, possibly in a single process - Tooling to run multiple blockchains/apps, possibly in a single process
- State syncing (without transaction replay) - State syncing (without transaction replay)
@@ -25,10 +24,54 @@ BUG FIXES:
- Graceful handling/recovery for apps that have non-determinism or fail to halt - Graceful handling/recovery for apps that have non-determinism or fail to halt
- Graceful handling/recovery for violations of safety, or liveness - Graceful handling/recovery for violations of safety, or liveness
## 0.19.0 (April 13th, 2018)
BREAKING:
- [cmd] improved `testnet` command; now it can fill in `persistent_peers` for you in the config file and much more (see `tendermint testnet --help` for details)
- [cmd] `show_node_id` now returns an error if there is no node key
- [rpc]: changed the output format for the `/status` endpoint (see https://godoc.org/github.com/tendermint/tendermint/rpc/core#Status)
Upgrade from go-wire to go-amino. This is a sweeping change that breaks everything that is
serialized to disk or over the network.
See github.com/tendermint/go-amino for details on the new format.
See `scripts/wire2amino.go` for a tool to upgrade
genesis/priv_validator/node_key JSON files.
FEATURES:
- [cmd] added `gen_node_key` command
## 0.18.0 (April 6th, 2018)
BREAKING:
- [types] Merkle tree uses different encoding for varints (see tmlibs v0.8.0)
- [types] ValidtorSet.GetByAddress returns -1 if no validator found
- [p2p] require all addresses come with an ID no matter what
- [rpc] Listening address must contain tcp:// or unix:// prefix
FEATURES:
- [rpc] StartHTTPAndTLSServer (not used yet)
- [rpc] Include validator's voting power in `/status`
- [rpc] `/tx` and `/tx_search` responses now include the transaction hash
- [rpc] Include peer NodeIDs in `/net_info`
IMPROVEMENTS:
- [config] trim whitespace from elements of lists (like `persistent_peers`)
- [rpc] `/tx_search` results are sorted by height
- [p2p] do not try to connect to ourselves (ok, maybe only once)
- [p2p] seeds respond with a bias towards good peers
BUG FIXES:
- [rpc] fix subscribing using an abci.ResponseDeliverTx tag
- [rpc] fix tx_indexers matchRange
- [rpc] fix unsubscribing (see tmlibs v0.8.0)
## 0.17.1 (March 27th, 2018) ## 0.17.1 (March 27th, 2018)
BUG FIXES: BUG FIXES:
- [types] Actually support `app_state` in genesis as `AppStateJSON` - [types] Actually support `app_state` in genesis as `AppStateJSON`
## 0.17.0 (March 27th, 2018) ## 0.17.0 (March 27th, 2018)

View File

@@ -1,8 +1,8 @@
FROM alpine:3.6 FROM alpine:3.7
# This is the release of tendermint to pull in. # This is the release of tendermint to pull in.
ENV TM_VERSION 0.15.0 ENV TM_VERSION 0.17.1
ENV TM_SHA256SUM 71cc271c67eca506ca492c8b90b090132f104bf5dbfe0af2702a50886e88de17 ENV TM_SHA256SUM d57008c63d2d9176861137e38ed203da486febf20ae7d388fb810a75afff8f24
# Tendermint will be looking for genesis file in /tendermint (unless you change # Tendermint will be looking for genesis file in /tendermint (unless you change
# `genesis_file` in config.toml). You can put your config.toml and private # `genesis_file` in config.toml). You can put your config.toml and private
@@ -26,7 +26,7 @@ RUN mkdir -p $DATA_ROOT && \
RUN apk add --no-cache bash curl jq RUN apk add --no-cache bash curl jq
RUN apk add --no-cache openssl && \ RUN apk add --no-cache openssl && \
wget https://s3-us-west-2.amazonaws.com/tendermint/binaries/tendermint/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \ wget https://github.com/tendermint/tendermint/releases/download/v${TM_VERSION}/tendermint_${TM_VERSION}_linux_amd64.zip && \
echo "${TM_SHA256SUM} tendermint_${TM_VERSION}_linux_amd64.zip" | sha256sum -c && \ echo "${TM_SHA256SUM} tendermint_${TM_VERSION}_linux_amd64.zip" | sha256sum -c && \
unzip -d /bin tendermint_${TM_VERSION}_linux_amd64.zip && \ unzip -d /bin tendermint_${TM_VERSION}_linux_amd64.zip && \
apk del openssl && \ apk del openssl && \

View File

@@ -1,4 +1,4 @@
FROM alpine:3.6 FROM alpine:3.7
ENV DATA_ROOT /tendermint ENV DATA_ROOT /tendermint
ENV TMHOME $DATA_ROOT ENV TMHOME $DATA_ROOT

View File

@@ -1,6 +1,7 @@
# Supported tags and respective `Dockerfile` links # Supported tags and respective `Dockerfile` links
- `0.15.0`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile) - `0.17.1`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/208ac32fa266657bd6c304e84ec828aa252bb0b8/DOCKER/Dockerfile)
- `0.15.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile)
- `0.13.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/a28b3fff49dce2fb31f90abb2fc693834e0029c2/DOCKER/Dockerfile) - `0.13.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/a28b3fff49dce2fb31f90abb2fc693834e0029c2/DOCKER/Dockerfile)
- `0.12.1` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/457c688346b565e90735431619ca3ca597ef9007/DOCKER/Dockerfile) - `0.12.1` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/457c688346b565e90735431619ca3ca597ef9007/DOCKER/Dockerfile)
- `0.12.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/70d8afa6e952e24c573ece345560a5971bf2cc0e/DOCKER/Dockerfile) - `0.12.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/70d8afa6e952e24c573ece345560a5971bf2cc0e/DOCKER/Dockerfile)

56
Gopkg.lock generated
View File

@@ -105,7 +105,7 @@
"json/scanner", "json/scanner",
"json/token" "json/token"
] ]
revision = "f40e974e75af4e271d97ce0fc917af5898ae7bda" revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168"
[[projects]] [[projects]]
name = "github.com/inconshreveable/mousetrap" name = "github.com/inconshreveable/mousetrap"
@@ -159,7 +159,7 @@
branch = "master" branch = "master"
name = "github.com/rcrowley/go-metrics" name = "github.com/rcrowley/go-metrics"
packages = ["."] packages = ["."]
revision = "8732c616f52954686704c8645fe1a9d59e9df7c1" revision = "d932a24a8ccb8fcadc993e5c6c58f93dac168294"
[[projects]] [[projects]]
name = "github.com/spf13/afero" name = "github.com/spf13/afero"
@@ -167,8 +167,8 @@
".", ".",
"mem" "mem"
] ]
revision = "bb8f1927f2a9d3ab41c9340aa034f6b803f4359c" revision = "63644898a8da0bc22138abf860edaf5277b6102e"
version = "v1.0.2" version = "v1.1.0"
[[projects]] [[projects]]
name = "github.com/spf13/cast" name = "github.com/spf13/cast"
@@ -179,8 +179,8 @@
[[projects]] [[projects]]
name = "github.com/spf13/cobra" name = "github.com/spf13/cobra"
packages = ["."] packages = ["."]
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b" revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4"
version = "v0.0.1" version = "v0.0.2"
[[projects]] [[projects]]
branch = "master" branch = "master"
@@ -191,8 +191,8 @@
[[projects]] [[projects]]
name = "github.com/spf13/pflag" name = "github.com/spf13/pflag"
packages = ["."] packages = ["."]
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.0" version = "v1.0.1"
[[projects]] [[projects]]
name = "github.com/spf13/viper" name = "github.com/spf13/viper"
@@ -226,7 +226,7 @@
"leveldb/table", "leveldb/table",
"leveldb/util" "leveldb/util"
] ]
revision = "169b1b37be738edb2813dab48c97a549bcf99bb5" revision = "714f901b98fdb3aa954b4193d8cbd64a28d80cad"
[[projects]] [[projects]]
name = "github.com/tendermint/abci" name = "github.com/tendermint/abci"
@@ -238,8 +238,8 @@
"server", "server",
"types" "types"
] ]
revision = "46686763ba8ea595ede16530ed4a40fb38f49f94" revision = "78a8905690ef54f9d57e3b2b0ee7ad3a04ef3f1f"
version = "v0.10.2" version = "v0.10.3"
[[projects]] [[projects]]
branch = "master" branch = "master"
@@ -251,20 +251,22 @@
] ]
revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057"
[[projects]]
name = "github.com/tendermint/go-amino"
packages = ["."]
revision = "42246108ff925a457fb709475070a03dfd3e2b5c"
version = "0.9.6"
[[projects]] [[projects]]
name = "github.com/tendermint/go-crypto" name = "github.com/tendermint/go-crypto"
packages = ["."] packages = ["."]
revision = "c3e19f3ea26f5c3357e0bcbb799b0761ef923755" revision = "915416979bf70efa4bcbf1c6cd5d64c5fff9fc19"
version = "v0.5.0" version = "v0.6.2"
[[projects]] [[projects]]
name = "github.com/tendermint/go-wire" name = "github.com/tendermint/go-wire"
packages = [ packages = ["."]
".",
"data"
]
revision = "fa721242b042ecd4c6ed1a934ee740db4f74e45c" revision = "fa721242b042ecd4c6ed1a934ee740db4f74e45c"
source = "github.com/tendermint/go-amino"
version = "v0.7.3" version = "v0.7.3"
[[projects]] [[projects]]
@@ -283,8 +285,8 @@
"pubsub/query", "pubsub/query",
"test" "test"
] ]
revision = "24da7009c3d8c019b40ba4287495749e3160caca" revision = "97e1f1ad3f510048929a51475811a18686c894df"
version = "v0.7.1" version = "0.8.2-rc0"
[[projects]] [[projects]]
branch = "master" branch = "master"
@@ -299,7 +301,7 @@
"ripemd160", "ripemd160",
"salsa20/salsa" "salsa20/salsa"
] ]
revision = "88942b9c40a4c9d203b82b3731787b672d6e809b" revision = "d6449816ce06963d9d136eee5a56fca5b0616e7e"
[[projects]] [[projects]]
branch = "master" branch = "master"
@@ -313,13 +315,13 @@
"lex/httplex", "lex/httplex",
"trace" "trace"
] ]
revision = "6078986fec03a1dcc236c34816c71b0e05018fda" revision = "61147c48b25b599e5b561d2e9c4f3e1ef489ca41"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = ["unix"] packages = ["unix"]
revision = "91ee8cde435411ca3f1cd365e8f20131aed4d0a1" revision = "3b87a42e500a6dc65dae1a55d0b641295971163e"
[[projects]] [[projects]]
name = "golang.org/x/text" name = "golang.org/x/text"
@@ -346,7 +348,7 @@
branch = "master" branch = "master"
name = "google.golang.org/genproto" name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"] packages = ["googleapis/rpc/status"]
revision = "f8c8703595236ae70fdf8789ecb656ea0bcdcf46" revision = "51d0944304c3cbce4afe9e5247e21100037bff78"
[[projects]] [[projects]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
@@ -375,12 +377,12 @@
[[projects]] [[projects]]
name = "gopkg.in/yaml.v2" name = "gopkg.in/yaml.v2"
packages = ["."] packages = ["."]
revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5" revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.1.1" version = "v2.2.1"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "4dca5dbd2d280d093d7c8fc423606ab86d6ad1b241b076a7716c2093b5a09231" inputs-digest = "e70f8692c825e80ae8510546e297840b9560d00e11b2272749a55cc2ffd147f0"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View File

@@ -26,12 +26,12 @@
[[constraint]] [[constraint]]
branch = "master"
name = "github.com/ebuchman/fail-test" name = "github.com/ebuchman/fail-test"
branch = "master"
[[constraint]] [[constraint]]
branch = "master"
name = "github.com/fortytw2/leaktest" name = "github.com/fortytw2/leaktest"
branch = "master"
[[constraint]] [[constraint]]
name = "github.com/go-kit/kit" name = "github.com/go-kit/kit"
@@ -54,8 +54,8 @@
version = "~0.8.0" version = "~0.8.0"
[[constraint]] [[constraint]]
branch = "master"
name = "github.com/rcrowley/go-metrics" name = "github.com/rcrowley/go-metrics"
branch = "master"
[[constraint]] [[constraint]]
name = "github.com/spf13/cobra" name = "github.com/spf13/cobra"
@@ -71,20 +71,19 @@
[[constraint]] [[constraint]]
name = "github.com/tendermint/abci" name = "github.com/tendermint/abci"
version = "~0.10.2" version = "~0.10.3"
[[constraint]] [[constraint]]
name = "github.com/tendermint/go-crypto" name = "github.com/tendermint/go-crypto"
version = "~0.5.0" version = "~0.6.2"
[[constraint]] [[constraint]]
name = "github.com/tendermint/go-wire" name = "github.com/tendermint/go-amino"
source = "github.com/tendermint/go-amino" version = "~0.9.6"
version = "~0.7.3"
[[constraint]] [[constraint]]
name = "github.com/tendermint/tmlibs" name = "github.com/tendermint/tmlibs"
version = "~0.7.1" version = "~0.8.2-rc0"
[[constraint]] [[constraint]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"

View File

@@ -14,13 +14,13 @@ check: check_tools ensure_deps
### Build ### Build
build: build:
go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/
build_race: build_race:
go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint
install: install:
go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint
######################################## ########################################
### Distribution ### Distribution
@@ -119,11 +119,6 @@ test_integrations:
make test_persistence make test_persistence
make test_p2p make test_p2p
test_libs:
# checkout every github.com/tendermint dir and run its tests
# NOTE: on release-* or master branches only (set by Jenkins)
docker run --name run_libs -t tester bash test/test_libs.sh
test_release: test_release:
@go test -tags release $(PACKAGES) @go test -tags release $(PACKAGES)
@@ -183,7 +178,25 @@ metalinter_all:
@echo "--> Running linter (all)" @echo "--> Running linter (all)"
gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./... gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./...
###########################################################
### Local testnet using docker
# Build linux binary on other platforms
build-linux:
GOOS=linux GOARCH=amd64 $(MAKE) build
# Run a 4-node testnet locally
docker-start:
@echo "Wait until 'Attaching to node0, node1, node2, node3' message appears"
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v `pwd`/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
docker-compose up
# Stop testnet
docker-stop:
docker-compose down
# To avoid unintended conflicts with file names, always add to .PHONY # To avoid unintended conflicts with file names, always add to .PHONY
# unless there is a reason not to. # unless there is a reason not to.
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_libs test_integrations test_release test100 vagrant_test fmt .PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux docker-start docker-stop

View File

@@ -4,8 +4,8 @@ import (
"testing" "testing"
"time" "time"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto" "github.com/tendermint/go-crypto"
"github.com/tendermint/go-wire"
proto "github.com/tendermint/tendermint/benchmarks/proto" proto "github.com/tendermint/tendermint/benchmarks/proto"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
@@ -14,26 +14,35 @@ import (
func BenchmarkEncodeStatusWire(b *testing.B) { func BenchmarkEncodeStatusWire(b *testing.B) {
b.StopTimer() b.StopTimer()
pubKey := crypto.GenPrivKeyEd25519().PubKey() cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
status := &ctypes.ResultStatus{ status := &ctypes.ResultStatus{
NodeInfo: p2p.NodeInfo{ NodeInfo: p2p.NodeInfo{
PubKey: pubKey, ID: nodeKey.ID(),
Moniker: "SOMENAME", Moniker: "SOMENAME",
Network: "SOMENAME", Network: "SOMENAME",
ListenAddr: "SOMEADDR", ListenAddr: "SOMEADDR",
Version: "SOMEVER", Version: "SOMEVER",
Other: []string{"SOMESTRING", "OTHERSTRING"}, Other: []string{"SOMESTRING", "OTHERSTRING"},
}, },
PubKey: pubKey, SyncInfo: ctypes.SyncInfo{
LatestBlockHash: []byte("SOMEBYTES"), LatestBlockHash: []byte("SOMEBYTES"),
LatestBlockHeight: 123, LatestBlockHeight: 123,
LatestBlockTime: time.Unix(0, 1234), LatestBlockTime: time.Unix(0, 1234),
},
ValidatorInfo: ctypes.ValidatorInfo{
PubKey: nodeKey.PubKey(),
},
} }
b.StartTimer() b.StartTimer()
counter := 0 counter := 0
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
jsonBytes := wire.JSONBytes(status) jsonBytes, err := cdc.MarshalJSON(status)
if err != nil {
panic(err)
}
counter += len(jsonBytes) counter += len(jsonBytes)
} }
@@ -41,9 +50,11 @@ func BenchmarkEncodeStatusWire(b *testing.B) {
func BenchmarkEncodeNodeInfoWire(b *testing.B) { func BenchmarkEncodeNodeInfoWire(b *testing.B) {
b.StopTimer() b.StopTimer()
pubKey := crypto.GenPrivKeyEd25519().PubKey() cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
nodeInfo := p2p.NodeInfo{ nodeInfo := p2p.NodeInfo{
PubKey: pubKey, ID: nodeKey.ID(),
Moniker: "SOMENAME", Moniker: "SOMENAME",
Network: "SOMENAME", Network: "SOMENAME",
ListenAddr: "SOMEADDR", ListenAddr: "SOMEADDR",
@@ -54,16 +65,21 @@ func BenchmarkEncodeNodeInfoWire(b *testing.B) {
counter := 0 counter := 0
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
jsonBytes := wire.JSONBytes(nodeInfo) jsonBytes, err := cdc.MarshalJSON(nodeInfo)
if err != nil {
panic(err)
}
counter += len(jsonBytes) counter += len(jsonBytes)
} }
} }
func BenchmarkEncodeNodeInfoBinary(b *testing.B) { func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
b.StopTimer() b.StopTimer()
pubKey := crypto.GenPrivKeyEd25519().PubKey() cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
nodeInfo := p2p.NodeInfo{ nodeInfo := p2p.NodeInfo{
PubKey: pubKey, ID: nodeKey.ID(),
Moniker: "SOMENAME", Moniker: "SOMENAME",
Network: "SOMENAME", Network: "SOMENAME",
ListenAddr: "SOMEADDR", ListenAddr: "SOMEADDR",
@@ -74,7 +90,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
counter := 0 counter := 0
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
jsonBytes := wire.BinaryBytes(nodeInfo) jsonBytes := cdc.MustMarshalBinaryBare(nodeInfo)
counter += len(jsonBytes) counter += len(jsonBytes)
} }
@@ -82,15 +98,20 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
func BenchmarkEncodeNodeInfoProto(b *testing.B) { func BenchmarkEncodeNodeInfoProto(b *testing.B) {
b.StopTimer() b.StopTimer()
pubKey := crypto.GenPrivKeyEd25519().PubKey().Unwrap().(crypto.PubKeyEd25519) nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()}
pubKey2 := &proto.PubKey{Ed25519: &proto.PubKeyEd25519{Bytes: pubKey[:]}} nodeID := string(nodeKey.ID())
someName := "SOMENAME"
someAddr := "SOMEADDR"
someVer := "SOMEVER"
someString := "SOMESTRING"
otherString := "OTHERSTRING"
nodeInfo := proto.NodeInfo{ nodeInfo := proto.NodeInfo{
PubKey: pubKey2, Id: &proto.ID{Id: &nodeID},
Moniker: "SOMENAME", Moniker: &someName,
Network: "SOMENAME", Network: &someName,
ListenAddr: "SOMEADDR", ListenAddr: &someAddr,
Version: "SOMEVER", Version: &someVer,
Other: []string{"SOMESTRING", "OTHERSTRING"}, Other: []string{someString, otherString},
} }
b.StartTimer() b.StartTimer()

View File

@@ -0,0 +1,26 @@
DIST_DIRS := find * -type d -exec
VERSION := $(shell perl -ne '/^var version.*"([^"]+)".*$$/ && print "v$$1\n"' main.go)
GOTOOLS = \
github.com/mitchellh/gox
tools:
go get $(GOTOOLS)
get_vendor_deps:
@hash glide 2>/dev/null || go get github.com/Masterminds/glide
glide install
build:
go build
install:
go install
test:
go test -race
clean:
rm -f ./experiments
rm -rf ./dist
.PHONY: tools get_vendor_deps build install test clean

View File

@@ -0,0 +1,12 @@
package: github.com/tendermint/tendermint/benchmarks/experiments
import:
- package: github.com/tendermint/tendermint
version: v0.16.0
subpackages:
- rpc/client
- rpc/lib/types
- types
- package: github.com/tendermint/tmlibs
version: v0.7.0
subpackages:
- log

View File

@@ -0,0 +1,126 @@
package main
import (
"encoding/binary"
"fmt"
"math/rand"
"os"
"sync"
"time"
"context"
"github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tmlibs/log"
)
var logger = log.NewNopLogger()
var finishedTasks = 0
var mutex = &sync.Mutex{}
func main() {
var endpoint = "tcp://0.0.0.0:46657"
var httpClient = getHTTPClient(endpoint)
var res, err = httpClient.Status()
if err != nil {
logger.Info("something wrong happens", err)
}
logger.Info("received status", res)
go monitorTask(endpoint)
txCount := 10
var clientNumber = 10
for i := 0; i < clientNumber; i++ {
go clientTask(i, txCount, endpoint)
}
for finishedTasks < clientNumber+1 {
}
fmt.Printf("Done: %d\n", finishedTasks)
}
func clientTask(id, txCount int, endpoint string) {
var httpClient = getHTTPClient(endpoint)
for i := 0; i < txCount; i++ {
var _, err = httpClient.BroadcastTxSync(generateTx(id, rand.Int()))
if err != nil {
fmt.Printf("Something wrong happened: %s\n", err)
}
}
fmt.Printf("Finished client task: %d\n", id)
mutex.Lock()
finishedTasks++
mutex.Unlock()
}
func getHTTPClient(rpcAddr string) *client.HTTP {
return client.NewHTTP(rpcAddr, "/websocket")
}
func generateTx(i, valI int) []byte {
// a tx encodes the validator index, the tx number, and some random junk
tx := make([]byte, 250)
binary.PutUvarint(tx[:32], uint64(valI))
binary.PutUvarint(tx[32:64], uint64(i))
if _, err := rand.Read(tx[65:]); err != nil {
fmt.Println("err reading from crypto/rand", err)
os.Exit(1)
}
return tx
}
func monitorTask(endpoint string) {
fmt.Println("Monitor task started...")
var duration = 5 * time.Second
const subscriber = "monitor"
ctx, cancel := context.WithTimeout(context.Background(), duration)
defer cancel()
evts := make(chan interface{})
var httpClient = getHTTPClient(endpoint)
httpClient.Start()
evtTyp := types.EventNewBlockHeader
// register for the next event of this type
query := types.QueryForEvent(evtTyp)
err := httpClient.Subscribe(ctx, subscriber, query, evts)
if err != nil {
fmt.Println("error when subscribing", err)
}
// make sure to unregister after the test is over
defer httpClient.UnsubscribeAll(ctx, subscriber)
totalNumOfCommittedTxs := int64(0)
for {
fmt.Println("Starting main loop", err)
select {
case evt := <-evts:
event := evt.(types.TMEventData)
header, ok := event.Unwrap().(types.EventDataNewBlockHeader)
if ok {
fmt.Println("received header\n", header.Header.StringIndented(""))
} else {
fmt.Println("not able to unwrap header")
}
// Do some metric computation with header
totalNumOfCommittedTxs += header.Header.NumTxs
case <-ctx.Done():
fmt.Printf("Finished monitor task. Received %d transactions \n", totalNumOfCommittedTxs)
mutex.Lock()
finishedTasks++
mutex.Unlock()
return
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,7 @@ message ResultStatus {
} }
message NodeInfo { message NodeInfo {
required PubKey pubKey = 1; required ID id = 1;
required string moniker = 2; required string moniker = 2;
required string network = 3; required string network = 3;
required string remoteAddr = 4; required string remoteAddr = 4;
@@ -16,6 +16,10 @@ message NodeInfo {
repeated string other = 7; repeated string other = 7;
} }
message ID {
required string id = 1;
}
message PubKey { message PubKey {
optional PubKeyEd25519 ed25519 = 1; optional PubKeyEd25519 ed25519 = 1;
} }

View File

@@ -1,21 +1,16 @@
package blockchain package blockchain
import ( import (
"bytes"
"errors"
"fmt" "fmt"
"reflect" "reflect"
"sync"
"time" "time"
wire "github.com/tendermint/go-wire" amino "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state" sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
) )
const ( const (
@@ -31,6 +26,13 @@ const (
statusUpdateIntervalSeconds = 10 statusUpdateIntervalSeconds = 10
// check if we should switch to consensus reactor // check if we should switch to consensus reactor
switchToConsensusIntervalSeconds = 1 switchToConsensusIntervalSeconds = 1
// NOTE: keep up to date with bcBlockResponseMessage
bcBlockResponseMessagePrefixSize = 4
bcBlockResponseMessageFieldKeySize = 1
maxMsgSize = types.MaxBlockSizeBytes +
bcBlockResponseMessagePrefixSize +
bcBlockResponseMessageFieldKeySize
) )
type consensusReactor interface { type consensusReactor interface {
@@ -52,9 +54,6 @@ func (e peerError) Error() string {
type BlockchainReactor struct { type BlockchainReactor struct {
p2p.BaseReactor p2p.BaseReactor
mtx sync.Mutex
params types.ConsensusParams
// immutable // immutable
initialState sm.State initialState sm.State
@@ -87,7 +86,6 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *Bl
) )
bcR := &BlockchainReactor{ bcR := &BlockchainReactor{
params: state.ConsensusParams,
initialState: state, initialState: state,
blockExec: blockExec, blockExec: blockExec,
store: store, store: store,
@@ -134,14 +132,16 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
ID: BlockchainChannel, ID: BlockchainChannel,
Priority: 10, Priority: 10,
SendQueueCapacity: 1000, SendQueueCapacity: 1000,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: maxMsgSize,
}, },
} }
} }
// AddPeer implements Reactor by sending our state to peer. // AddPeer implements Reactor by sending our state to peer.
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
if !peer.Send(BlockchainChannel, msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) { if !peer.Send(BlockchainChannel, msgBytes) {
// doing nothing, will try later in `poolRoutine` // doing nothing, will try later in `poolRoutine`
} }
// peer is added to the pool once we receive the first // peer is added to the pool once we receive the first
@@ -162,20 +162,19 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage,
block := bcR.store.LoadBlock(msg.Height) block := bcR.store.LoadBlock(msg.Height)
if block != nil { if block != nil {
msg := &bcBlockResponseMessage{Block: block} msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block})
return src.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg}) return src.TrySend(BlockchainChannel, msgBytes)
} }
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
return src.TrySend(BlockchainChannel, struct{ BlockchainMessage }{ msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: msg.Height})
&bcNoBlockResponseMessage{Height: msg.Height}, return src.TrySend(BlockchainChannel, msgBytes)
})
} }
// Receive implements Reactor by handling 4 types of messages (look below). // Receive implements Reactor by handling 4 types of messages (look below).
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes, bcR.maxMsgSize()) msg, err := DecodeMessage(msgBytes)
if err != nil { if err != nil {
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
bcR.Switch.StopPeerForError(src, err) bcR.Switch.StopPeerForError(src, err)
@@ -194,8 +193,8 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes)) bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes))
case *bcStatusRequestMessage: case *bcStatusRequestMessage:
// Send peer our state. // Send peer our state.
queued := src.TrySend(BlockchainChannel, msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) queued := src.TrySend(BlockchainChannel, msgBytes)
if !queued { if !queued {
// sorry // sorry
} }
@@ -207,21 +206,6 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
} }
} }
// maxMsgSize returns the maximum allowable size of a
// message on the blockchain reactor.
func (bcR *BlockchainReactor) maxMsgSize() int {
bcR.mtx.Lock()
defer bcR.mtx.Unlock()
return bcR.params.BlockSize.MaxBytes + 2
}
// updateConsensusParams updates the internal consensus params
func (bcR *BlockchainReactor) updateConsensusParams(params types.ConsensusParams) {
bcR.mtx.Lock()
defer bcR.mtx.Unlock()
bcR.params = params
}
// Handle messages from the poolReactor telling the reactor what to do. // Handle messages from the poolReactor telling the reactor what to do.
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.) // (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.)
@@ -247,8 +231,8 @@ FOR_LOOP:
if peer == nil { if peer == nil {
continue FOR_LOOP // Peer has since been disconnected. continue FOR_LOOP // Peer has since been disconnected.
} }
msg := &bcBlockRequestMessage{request.Height} msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height})
queued := peer.TrySend(BlockchainChannel, struct{ BlockchainMessage }{msg}) queued := peer.TrySend(BlockchainChannel, msgBytes)
if !queued { if !queued {
// We couldn't make the request, send-queue full. // We couldn't make the request, send-queue full.
// The pool handles timeouts, just let it go. // The pool handles timeouts, just let it go.
@@ -321,9 +305,6 @@ FOR_LOOP:
} }
blocksSynced++ blocksSynced++
// update the consensus params
bcR.updateConsensusParams(state.ConsensusParams)
if blocksSynced%100 == 0 { if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height, bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
@@ -341,43 +322,36 @@ FOR_LOOP:
// BroadcastStatusRequest broadcasts `BlockStore` height. // BroadcastStatusRequest broadcasts `BlockStore` height.
func (bcR *BlockchainReactor) BroadcastStatusRequest() error { func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
bcR.Switch.Broadcast(BlockchainChannel, msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()})
struct{ BlockchainMessage }{&bcStatusRequestMessage{bcR.store.Height()}}) bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
return nil return nil
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Messages // Messages
const (
msgTypeBlockRequest = byte(0x10)
msgTypeBlockResponse = byte(0x11)
msgTypeNoBlockResponse = byte(0x12)
msgTypeStatusResponse = byte(0x20)
msgTypeStatusRequest = byte(0x21)
)
// BlockchainMessage is a generic message for this reactor. // BlockchainMessage is a generic message for this reactor.
type BlockchainMessage interface{} type BlockchainMessage interface{}
var _ = wire.RegisterInterface( func RegisterBlockchainMessages(cdc *amino.Codec) {
struct{ BlockchainMessage }{}, cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
wire.ConcreteType{&bcBlockRequestMessage{}, msgTypeBlockRequest}, cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/mempool/BlockRequest", nil)
wire.ConcreteType{&bcBlockResponseMessage{}, msgTypeBlockResponse}, cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/mempool/BlockResponse", nil)
wire.ConcreteType{&bcNoBlockResponseMessage{}, msgTypeNoBlockResponse}, cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/mempool/NoBlockResponse", nil)
wire.ConcreteType{&bcStatusResponseMessage{}, msgTypeStatusResponse}, cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/mempool/StatusResponse", nil)
wire.ConcreteType{&bcStatusRequestMessage{}, msgTypeStatusRequest}, cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil)
) }
// DecodeMessage decodes BlockchainMessage. // DecodeMessage decodes BlockchainMessage.
// TODO: ensure that bz is completely read. // TODO: ensure that bz is completely read.
func DecodeMessage(bz []byte, maxSize int) (msgType byte, msg BlockchainMessage, err error) { func DecodeMessage(bz []byte) (msg BlockchainMessage, err error) {
msgType = bz[0] if len(bz) > maxMsgSize {
n := int(0) return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
r := bytes.NewReader(bz) len(bz), maxMsgSize)
msg = wire.ReadBinary(struct{ BlockchainMessage }{}, r, maxSize, &n, &err).(struct{ BlockchainMessage }).BlockchainMessage }
if err != nil && n != len(bz) { err = cdc.UnmarshalBinaryBare(bz, &msg)
err = errors.New("DecodeMessage() had bytes left over") if err != nil {
err = cmn.ErrorWrap(err, "DecodeMessage() had bytes left over")
} }
return return
} }
@@ -402,7 +376,6 @@ func (brm *bcNoBlockResponseMessage) String() string {
//------------------------------------- //-------------------------------------
// NOTE: keep up-to-date with maxBlockchainResponseSize
type bcBlockResponseMessage struct { type bcBlockResponseMessage struct {
Block *types.Block Block *types.Block
} }

View File

@@ -3,8 +3,6 @@ package blockchain
import ( import (
"testing" "testing"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@@ -18,8 +16,15 @@ import (
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) { func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
config := cfg.ResetTestRoot("blockchain_reactor_test") config := cfg.ResetTestRoot("blockchain_reactor_test")
blockStore := NewBlockStore(dbm.NewMemDB()) // blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
state, _ := sm.LoadStateFromDBOrGenesisFile(dbm.NewMemDB(), config.GenesisFile()) // stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
blockStore := NewBlockStore(blockDB)
state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
if err != nil {
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
}
return state, blockStore return state, blockStore
} }
@@ -76,10 +81,9 @@ func TestNoBlockResponse(t *testing.T) {
// wait for our response to be received on the peer // wait for our response to be received on the peer
for _, tt := range tests { for _, tt := range tests {
reqBlockMsg := &bcBlockRequestMessage{tt.height} reqBlockMsg := &bcBlockRequestMessage{tt.height}
reqBlockBytes := wire.BinaryBytes(struct{ BlockchainMessage }{reqBlockMsg}) reqBlockBytes := cdc.MustMarshalBinaryBare(reqBlockMsg)
bcr.Receive(chID, peer, reqBlockBytes) bcr.Receive(chID, peer, reqBlockBytes)
value := peer.lastValue() msg := peer.lastBlockchainMessage()
msg := value.(struct{ BlockchainMessage }).BlockchainMessage
if tt.existent { if tt.existent {
if blockMsg, ok := msg.(*bcBlockResponseMessage); !ok { if blockMsg, ok := msg.(*bcBlockResponseMessage); !ok {
@@ -173,22 +177,26 @@ func newbcrTestPeer(id p2p.ID) *bcrTestPeer {
return bcr return bcr
} }
func (tp *bcrTestPeer) lastValue() interface{} { return <-tp.ch } func (tp *bcrTestPeer) lastBlockchainMessage() interface{} { return <-tp.ch }
func (tp *bcrTestPeer) TrySend(chID byte, value interface{}) bool { func (tp *bcrTestPeer) TrySend(chID byte, msgBytes []byte) bool {
if _, ok := value.(struct{ BlockchainMessage }). var msg BlockchainMessage
BlockchainMessage.(*bcStatusResponseMessage); ok { err := cdc.UnmarshalBinaryBare(msgBytes, &msg)
if err != nil {
panic(cmn.ErrorWrap(err, "Error while trying to parse a BlockchainMessage"))
}
if _, ok := msg.(*bcStatusResponseMessage); ok {
// Discard status response messages since they skew our results // Discard status response messages since they skew our results
// We only want to deal with: // We only want to deal with:
// + bcBlockResponseMessage // + bcBlockResponseMessage
// + bcNoBlockResponseMessage // + bcNoBlockResponseMessage
} else { } else {
tp.ch <- value tp.ch <- msg
} }
return true return true
} }
func (tp *bcrTestPeer) Send(chID byte, data interface{}) bool { return tp.TrySend(chID, data) } func (tp *bcrTestPeer) Send(chID byte, msgBytes []byte) bool { return tp.TrySend(chID, msgBytes) }
func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} } func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} }
func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} } func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} }
func (tp *bcrTestPeer) ID() p2p.ID { return tp.id } func (tp *bcrTestPeer) ID() p2p.ID { return tp.id }

View File

@@ -1,14 +1,9 @@
package blockchain package blockchain
import ( import (
"bytes"
"encoding/json"
"fmt" "fmt"
"io"
"sync" "sync"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
@@ -54,38 +49,25 @@ func (bs *BlockStore) Height() int64 {
return bs.height return bs.height
} }
// GetReader returns the value associated with the given key wrapped in an io.Reader.
// If no value is found, it returns nil.
// It's mainly for use with wire.ReadBinary.
func (bs *BlockStore) GetReader(key []byte) io.Reader {
bytez := bs.db.Get(key)
if bytez == nil {
return nil
}
return bytes.NewReader(bytez)
}
// LoadBlock returns the block with the given height. // LoadBlock returns the block with the given height.
// If no block is found for that height, it returns nil. // If no block is found for that height, it returns nil.
func (bs *BlockStore) LoadBlock(height int64) *types.Block { func (bs *BlockStore) LoadBlock(height int64) *types.Block {
var n int var blockMeta = bs.LoadBlockMeta(height)
var err error if blockMeta == nil {
r := bs.GetReader(calcBlockMetaKey(height))
if r == nil {
return nil return nil
} }
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil { var block = new(types.Block)
panic(fmt.Sprintf("Error reading block meta: %v", err)) buf := []byte{}
}
bytez := []byte{}
for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ { for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ {
part := bs.LoadBlockPart(height, i) part := bs.LoadBlockPart(height, i)
bytez = append(bytez, part.Bytes...) buf = append(buf, part.Bytes...)
} }
block := wire.ReadBinary(&types.Block{}, bytes.NewReader(bytez), 0, &n, &err).(*types.Block) err := cdc.UnmarshalBinary(buf, block)
if err != nil { if err != nil {
panic(fmt.Sprintf("Error reading block: %v", err)) // NOTE: The existence of meta should imply the existence of the
// block. So, make sure meta is only saved after blocks are saved.
panic(cmn.ErrorWrap(err, "Error reading block"))
} }
return block return block
} }
@@ -94,15 +76,14 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block {
// from the block at the given height. // from the block at the given height.
// If no part is found for the given height and index, it returns nil. // If no part is found for the given height and index, it returns nil.
func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
var n int var part = new(types.Part)
var err error bz := bs.db.Get(calcBlockPartKey(height, index))
r := bs.GetReader(calcBlockPartKey(height, index)) if len(bz) == 0 {
if r == nil {
return nil return nil
} }
part := wire.ReadBinary(&types.Part{}, r, 0, &n, &err).(*types.Part) err := cdc.UnmarshalBinaryBare(bz, part)
if err != nil { if err != nil {
panic(fmt.Sprintf("Error reading block part: %v", err)) panic(cmn.ErrorWrap(err, "Error reading block part"))
} }
return part return part
} }
@@ -110,15 +91,14 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
// LoadBlockMeta returns the BlockMeta for the given height. // LoadBlockMeta returns the BlockMeta for the given height.
// If no block is found for the given height, it returns nil. // If no block is found for the given height, it returns nil.
func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
var n int var blockMeta = new(types.BlockMeta)
var err error bz := bs.db.Get(calcBlockMetaKey(height))
r := bs.GetReader(calcBlockMetaKey(height)) if len(bz) == 0 {
if r == nil {
return nil return nil
} }
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta) err := cdc.UnmarshalBinaryBare(bz, blockMeta)
if err != nil { if err != nil {
panic(fmt.Sprintf("Error reading block meta: %v", err)) panic(cmn.ErrorWrap(err, "Error reading block meta"))
} }
return blockMeta return blockMeta
} }
@@ -128,15 +108,14 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
// and it comes from the block.LastCommit for `height+1`. // and it comes from the block.LastCommit for `height+1`.
// If no commit is found for the given height, it returns nil. // If no commit is found for the given height, it returns nil.
func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
var n int var commit = new(types.Commit)
var err error bz := bs.db.Get(calcBlockCommitKey(height))
r := bs.GetReader(calcBlockCommitKey(height)) if len(bz) == 0 {
if r == nil {
return nil return nil
} }
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit) err := cdc.UnmarshalBinaryBare(bz, commit)
if err != nil { if err != nil {
panic(fmt.Sprintf("Error reading commit: %v", err)) panic(cmn.ErrorWrap(err, "Error reading block commit"))
} }
return commit return commit
} }
@@ -145,15 +124,14 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
// This is useful when we've seen a commit, but there has not yet been // This is useful when we've seen a commit, but there has not yet been
// a new block at `height + 1` that includes this commit in its block.LastCommit. // a new block at `height + 1` that includes this commit in its block.LastCommit.
func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
var n int var commit = new(types.Commit)
var err error bz := bs.db.Get(calcSeenCommitKey(height))
r := bs.GetReader(calcSeenCommitKey(height)) if len(bz) == 0 {
if r == nil {
return nil return nil
} }
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit) err := cdc.UnmarshalBinaryBare(bz, commit)
if err != nil { if err != nil {
panic(fmt.Sprintf("Error reading commit: %v", err)) panic(cmn.ErrorWrap(err, "Error reading block seen commit"))
} }
return commit return commit
} }
@@ -178,21 +156,22 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
// Save block meta // Save block meta
blockMeta := types.NewBlockMeta(block, blockParts) blockMeta := types.NewBlockMeta(block, blockParts)
metaBytes := wire.BinaryBytes(blockMeta) metaBytes := cdc.MustMarshalBinaryBare(blockMeta)
bs.db.Set(calcBlockMetaKey(height), metaBytes) bs.db.Set(calcBlockMetaKey(height), metaBytes)
// Save block parts // Save block parts
for i := 0; i < blockParts.Total(); i++ { for i := 0; i < blockParts.Total(); i++ {
bs.saveBlockPart(height, i, blockParts.GetPart(i)) part := blockParts.GetPart(i)
bs.saveBlockPart(height, i, part)
} }
// Save block commit (duplicate and separate from the Block) // Save block commit (duplicate and separate from the Block)
blockCommitBytes := wire.BinaryBytes(block.LastCommit) blockCommitBytes := cdc.MustMarshalBinaryBare(block.LastCommit)
bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes) bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes)
// Save seen commit (seen +2/3 precommits for block) // Save seen commit (seen +2/3 precommits for block)
// NOTE: we can delete this at a later height // NOTE: we can delete this at a later height
seenCommitBytes := wire.BinaryBytes(seenCommit) seenCommitBytes := cdc.MustMarshalBinaryBare(seenCommit)
bs.db.Set(calcSeenCommitKey(height), seenCommitBytes) bs.db.Set(calcSeenCommitKey(height), seenCommitBytes)
// Save new BlockStoreStateJSON descriptor // Save new BlockStoreStateJSON descriptor
@@ -211,7 +190,7 @@ func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
if height != bs.Height()+1 { if height != bs.Height()+1 {
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
} }
partBytes := wire.BinaryBytes(part) partBytes := cdc.MustMarshalBinaryBare(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes) bs.db.Set(calcBlockPartKey(height, index), partBytes)
} }
@@ -238,12 +217,12 @@ func calcSeenCommitKey(height int64) []byte {
var blockStoreKey = []byte("blockStore") var blockStoreKey = []byte("blockStore")
type BlockStoreStateJSON struct { type BlockStoreStateJSON struct {
Height int64 Height int64 `json:"height"`
} }
// Save persists the blockStore state to the database as JSON. // Save persists the blockStore state to the database as JSON.
func (bsj BlockStoreStateJSON) Save(db dbm.DB) { func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
bytes, err := json.Marshal(bsj) bytes, err := cdc.MarshalJSON(bsj)
if err != nil { if err != nil {
cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err)) cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err))
} }
@@ -260,7 +239,7 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
} }
} }
bsj := BlockStoreStateJSON{} bsj := BlockStoreStateJSON{}
err := json.Unmarshal(bytes, &bsj) err := cdc.UnmarshalJSON(bytes, &bsj)
if err != nil { if err != nil {
panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes)) panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes))
} }

View File

@@ -3,7 +3,6 @@ package blockchain
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"io/ioutil"
"runtime/debug" "runtime/debug"
"strings" "strings"
"testing" "testing"
@@ -11,9 +10,6 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@@ -35,7 +31,7 @@ func TestNewBlockStore(t *testing.T) {
db := db.NewMemDB() db := db.NewMemDB()
db.Set(blockStoreKey, []byte(`{"height": 10000}`)) db.Set(blockStoreKey, []byte(`{"height": 10000}`))
bs := NewBlockStore(db) bs := NewBlockStore(db)
assert.Equal(t, bs.Height(), int64(10000), "failed to properly parse blockstore") require.Equal(t, int64(10000), bs.Height(), "failed to properly parse blockstore")
panicCausers := []struct { panicCausers := []struct {
data []byte data []byte
@@ -61,38 +57,6 @@ func TestNewBlockStore(t *testing.T) {
assert.Equal(t, bs.Height(), int64(0), "expecting nil bytes to be unmarshaled alright") assert.Equal(t, bs.Height(), int64(0), "expecting nil bytes to be unmarshaled alright")
} }
func TestBlockStoreGetReader(t *testing.T) {
db := db.NewMemDB()
// Initial setup
db.Set([]byte("Foo"), []byte("Bar"))
db.Set([]byte("Foo1"), nil)
bs := NewBlockStore(db)
tests := [...]struct {
key []byte
want []byte
}{
0: {key: []byte("Foo"), want: []byte("Bar")},
1: {key: []byte("KnoxNonExistent"), want: nil},
2: {key: []byte("Foo1"), want: []byte{}},
}
for i, tt := range tests {
r := bs.GetReader(tt.key)
if r == nil {
assert.Nil(t, tt.want, "#%d: expected a non-nil reader", i)
continue
}
slurp, err := ioutil.ReadAll(r)
if err != nil {
t.Errorf("#%d: unexpected Read err: %v", i, err)
} else {
assert.Equal(t, slurp, tt.want, "#%d: mismatch", i)
}
}
}
func freshBlockStore() (*BlockStore, db.DB) { func freshBlockStore() (*BlockStore, db.DB) {
db := db.NewMemDB() db := db.NewMemDB()
return NewBlockStore(db), db return NewBlockStore(db), db
@@ -189,14 +153,14 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
parts: validPartSet, parts: validPartSet,
seenCommit: seenCommit1, seenCommit: seenCommit1,
corruptCommitInDB: true, // Corrupt the DB's commit entry corruptCommitInDB: true, // Corrupt the DB's commit entry
wantPanic: "rror reading commit", wantPanic: "Error reading block commit",
}, },
{ {
block: newBlock(&header1, commitAtH10), block: newBlock(&header1, commitAtH10),
parts: validPartSet, parts: validPartSet,
seenCommit: seenCommit1, seenCommit: seenCommit1,
wantPanic: "rror reading block", wantPanic: "Error reading block",
corruptBlockInDB: true, // Corrupt the DB's block entry corruptBlockInDB: true, // Corrupt the DB's block entry
}, },
@@ -215,7 +179,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
seenCommit: seenCommit1, seenCommit: seenCommit1,
corruptSeenCommitInDB: true, corruptSeenCommitInDB: true,
wantPanic: "rror reading commit", wantPanic: "Error reading block seen commit",
}, },
{ {
@@ -305,14 +269,6 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
} }
} }
func binarySerializeIt(v interface{}) []byte {
var n int
var err error
buf := new(bytes.Buffer)
wire.WriteBinary(v, buf, &n, &err)
return buf.Bytes()
}
func TestLoadBlockPart(t *testing.T) { func TestLoadBlockPart(t *testing.T) {
bs, db := freshBlockStore() bs, db := freshBlockStore()
height, index := int64(10), 1 height, index := int64(10), 1
@@ -334,7 +290,7 @@ func TestLoadBlockPart(t *testing.T) {
require.Contains(t, panicErr.Error(), "Error reading block part") require.Contains(t, panicErr.Error(), "Error reading block part")
// 3. A good block serialized and saved to the DB should be retrievable // 3. A good block serialized and saved to the DB should be retrievable
db.Set(calcBlockPartKey(height, index), binarySerializeIt(part1)) db.Set(calcBlockPartKey(height, index), cdc.MustMarshalBinaryBare(part1))
gotPart, _, panicErr := doFn(loadPart) gotPart, _, panicErr := doFn(loadPart)
require.Nil(t, panicErr, "an existent and proper block should not panic") require.Nil(t, panicErr, "an existent and proper block should not panic")
require.Nil(t, res, "a properly saved block should return a proper block") require.Nil(t, res, "a properly saved block should return a proper block")
@@ -364,11 +320,11 @@ func TestLoadBlockMeta(t *testing.T) {
// 3. A good blockMeta serialized and saved to the DB should be retrievable // 3. A good blockMeta serialized and saved to the DB should be retrievable
meta := &types.BlockMeta{} meta := &types.BlockMeta{}
db.Set(calcBlockMetaKey(height), binarySerializeIt(meta)) db.Set(calcBlockMetaKey(height), cdc.MustMarshalBinaryBare(meta))
gotMeta, _, panicErr := doFn(loadMeta) gotMeta, _, panicErr := doFn(loadMeta)
require.Nil(t, panicErr, "an existent and proper block should not panic") require.Nil(t, panicErr, "an existent and proper block should not panic")
require.Nil(t, res, "a properly saved blockMeta should return a proper blocMeta ") require.Nil(t, res, "a properly saved blockMeta should return a proper blocMeta ")
require.Equal(t, binarySerializeIt(meta), binarySerializeIt(gotMeta), require.Equal(t, cdc.MustMarshalBinaryBare(meta), cdc.MustMarshalBinaryBare(gotMeta),
"expecting successful retrieval of previously saved blockMeta") "expecting successful retrieval of previously saved blockMeta")
} }
@@ -385,6 +341,9 @@ func TestBlockFetchAtHeight(t *testing.T) {
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
blockAtHeight := bs.LoadBlock(bs.Height()) blockAtHeight := bs.LoadBlock(bs.Height())
bz1 := cdc.MustMarshalBinaryBare(block)
bz2 := cdc.MustMarshalBinaryBare(blockAtHeight)
require.Equal(t, bz1, bz2)
require.Equal(t, block.Hash(), blockAtHeight.Hash(), require.Equal(t, block.Hash(), blockAtHeight.Hash(),
"expecting a successful load of the last saved block") "expecting a successful load of the last saved block")

13
blockchain/wire.go Normal file
View File

@@ -0,0 +1,13 @@
package blockchain
import (
amino "github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
)
var cdc = amino.NewCodec()
func init() {
RegisterBlockchainMessages(cdc)
crypto.RegisterAmino(cdc)
}

View File

@@ -30,7 +30,7 @@ func main() {
"privPath", *privValPath, "privPath", *privValPath,
) )
privVal := priv_val.LoadPrivValidatorJSON(*privValPath) privVal := priv_val.LoadFilePV(*privValPath)
rs := priv_val.NewRemoteSigner( rs := priv_val.NewRemoteSigner(
logger, logger,

View File

@@ -0,0 +1,32 @@
package commands
import (
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/p2p"
cmn "github.com/tendermint/tmlibs/common"
)
// GenNodeKeyCmd allows the generation of a node key. It prints node's ID to
// the standard output.
var GenNodeKeyCmd = &cobra.Command{
Use: "gen_node_key",
Short: "Generate a node key for this node and print its ID",
RunE: genNodeKey,
}
func genNodeKey(cmd *cobra.Command, args []string) error {
nodeKeyFile := config.NodeKeyFile()
if cmn.FileExists(nodeKeyFile) {
return fmt.Errorf("node key at %s already exists", nodeKeyFile)
}
nodeKey, err := p2p.LoadOrGenNodeKey(nodeKeyFile)
if err != nil {
return err
}
fmt.Println(nodeKey.ID())
return nil
}

View File

@@ -1,12 +1,11 @@
package commands package commands
import ( import (
"encoding/json"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/tendermint/tendermint/types" pvm "github.com/tendermint/tendermint/types/priv_validator"
) )
// GenValidatorCmd allows the generation of a keypair for a // GenValidatorCmd allows the generation of a keypair for a
@@ -18,11 +17,11 @@ var GenValidatorCmd = &cobra.Command{
} }
func genValidator(cmd *cobra.Command, args []string) { func genValidator(cmd *cobra.Command, args []string) {
privValidator := types.GenPrivValidatorFS("") pv := pvm.GenFilePV("")
privValidatorJSONBytes, err := json.MarshalIndent(privValidator, "", "\t") jsbz, err := cdc.MarshalJSON(pv)
if err != nil { if err != nil {
panic(err) panic(err)
} }
fmt.Printf(`%v fmt.Printf(`%v
`, string(privValidatorJSONBytes)) `, string(jsbz))
} }

View File

@@ -3,7 +3,10 @@ package commands
import ( import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
pvm "github.com/tendermint/tendermint/types/priv_validator"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
) )
@@ -11,22 +14,36 @@ import (
var InitFilesCmd = &cobra.Command{ var InitFilesCmd = &cobra.Command{
Use: "init", Use: "init",
Short: "Initialize Tendermint", Short: "Initialize Tendermint",
Run: initFiles, RunE: initFiles,
} }
func initFiles(cmd *cobra.Command, args []string) { func initFiles(cmd *cobra.Command, args []string) error {
return initFilesWithConfig(config)
}
func initFilesWithConfig(config *cfg.Config) error {
// private validator // private validator
privValFile := config.PrivValidatorFile() privValFile := config.PrivValidatorFile()
var privValidator *types.PrivValidatorFS var pv *pvm.FilePV
if cmn.FileExists(privValFile) { if cmn.FileExists(privValFile) {
privValidator = types.LoadPrivValidatorFS(privValFile) pv = pvm.LoadFilePV(privValFile)
logger.Info("Found private validator", "path", privValFile) logger.Info("Found private validator", "path", privValFile)
} else { } else {
privValidator = types.GenPrivValidatorFS(privValFile) pv = pvm.GenFilePV(privValFile)
privValidator.Save() pv.Save()
logger.Info("Generated private validator", "path", privValFile) logger.Info("Generated private validator", "path", privValFile)
} }
nodeKeyFile := config.NodeKeyFile()
if cmn.FileExists(nodeKeyFile) {
logger.Info("Found node key", "path", nodeKeyFile)
} else {
if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
return err
}
logger.Info("Generated node key", "path", nodeKeyFile)
}
// genesis file // genesis file
genFile := config.GenesisFile() genFile := config.GenesisFile()
if cmn.FileExists(genFile) { if cmn.FileExists(genFile) {
@@ -36,13 +53,15 @@ func initFiles(cmd *cobra.Command, args []string) {
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)), ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
} }
genDoc.Validators = []types.GenesisValidator{{ genDoc.Validators = []types.GenesisValidator{{
PubKey: privValidator.GetPubKey(), PubKey: pv.GetPubKey(),
Power: 10, Power: 10,
}} }}
if err := genDoc.SaveAs(genFile); err != nil { if err := genDoc.SaveAs(genFile); err != nil {
panic(err) return err
} }
logger.Info("Generated genesis file", "path", genFile) logger.Info("Generated genesis file", "path", genFile)
} }
return nil
} }

View File

@@ -34,14 +34,14 @@ var (
) )
func init() { func init() {
LiteCmd.Flags().StringVar(&listenAddr, "laddr", ":8888", "Serve the proxy on the given port") LiteCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888", "Serve the proxy on the given address")
LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:46657", "Connect to a Tendermint node at this address") LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:46657", "Connect to a Tendermint node at this address")
LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID") LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID")
LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory") LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory")
} }
func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) { func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) {
u, err := url.Parse(nodeAddr) u, err := url.Parse(addr)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@@ -1,7 +1,6 @@
package commands package commands
import ( import (
"encoding/json"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@@ -22,7 +21,7 @@ func probeUpnp(cmd *cobra.Command, args []string) error {
fmt.Println("Probe failed: ", err) fmt.Println("Probe failed: ", err)
} else { } else {
fmt.Println("Probe success!") fmt.Println("Probe success!")
jsonBytes, err := json.Marshal(capabilities) jsonBytes, err := cdc.MarshalJSON(capabilities)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -5,7 +5,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/tendermint/tendermint/types" pvm "github.com/tendermint/tendermint/types/priv_validator"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
) )
@@ -27,7 +27,7 @@ var ResetPrivValidatorCmd = &cobra.Command{
// ResetAll removes the privValidator files. // ResetAll removes the privValidator files.
// Exported so other CLI tools can use it. // Exported so other CLI tools can use it.
func ResetAll(dbDir, privValFile string, logger log.Logger) { func ResetAll(dbDir, privValFile string, logger log.Logger) {
resetPrivValidatorFS(privValFile, logger) resetFilePV(privValFile, logger)
if err := os.RemoveAll(dbDir); err != nil { if err := os.RemoveAll(dbDir); err != nil {
logger.Error("Error removing directory", "err", err) logger.Error("Error removing directory", "err", err)
return return
@@ -44,18 +44,18 @@ func resetAll(cmd *cobra.Command, args []string) {
// XXX: this is totally unsafe. // XXX: this is totally unsafe.
// it's only suitable for testnets. // it's only suitable for testnets.
func resetPrivValidator(cmd *cobra.Command, args []string) { func resetPrivValidator(cmd *cobra.Command, args []string) {
resetPrivValidatorFS(config.PrivValidatorFile(), logger) resetFilePV(config.PrivValidatorFile(), logger)
} }
func resetPrivValidatorFS(privValFile string, logger log.Logger) { func resetFilePV(privValFile string, logger log.Logger) {
// Get PrivValidator // Get PrivValidator
if _, err := os.Stat(privValFile); err == nil { if _, err := os.Stat(privValFile); err == nil {
privValidator := types.LoadPrivValidatorFS(privValFile) pv := pvm.LoadFilePV(privValFile)
privValidator.Reset() pv.Reset()
logger.Info("Reset PrivValidator", "file", privValFile) logger.Info("Reset PrivValidator", "file", privValFile)
} else { } else {
privValidator := types.GenPrivValidatorFS(privValFile) pv := pvm.GenFilePV(privValFile)
privValidator.Save() pv.Save()
logger.Info("Generated PrivValidator", "file", privValFile) logger.Info("Generated PrivValidator", "file", privValFile)
} }
} }

View File

@@ -57,9 +57,8 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
if err := n.Start(); err != nil { if err := n.Start(); err != nil {
return fmt.Errorf("Failed to start node: %v", err) return fmt.Errorf("Failed to start node: %v", err)
} else {
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
} }
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
// Trap signal, run forever. // Trap signal, run forever.
n.RunForever() n.RunForever()

View File

@@ -16,10 +16,12 @@ var ShowNodeIDCmd = &cobra.Command{
} }
func showNodeID(cmd *cobra.Command, args []string) error { func showNodeID(cmd *cobra.Command, args []string) error {
nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
if err != nil { if err != nil {
return err return err
} }
fmt.Println(nodeKey.ID()) fmt.Println(nodeKey.ID())
return nil return nil
} }

View File

@@ -5,8 +5,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/tendermint/go-wire/data" privval "github.com/tendermint/tendermint/types/priv_validator"
"github.com/tendermint/tendermint/types"
) )
// ShowValidatorCmd adds capabilities for showing the validator info. // ShowValidatorCmd adds capabilities for showing the validator info.
@@ -17,7 +16,7 @@ var ShowValidatorCmd = &cobra.Command{
} }
func showValidator(cmd *cobra.Command, args []string) { func showValidator(cmd *cobra.Command, args []string) {
privValidator := types.LoadOrGenPrivValidatorFS(config.PrivValidatorFile()) privValidator := privval.LoadOrGenFilePV(config.PrivValidatorFile())
pubKeyJSONBytes, _ := data.ToJSON(privValidator.PubKey) pubKeyJSONBytes, _ := cdc.MarshalJSON(privValidator.GetPubKey())
fmt.Println(string(pubKeyJSONBytes)) fmt.Println(string(pubKeyJSONBytes))
} }

View File

@@ -2,59 +2,103 @@ package commands
import ( import (
"fmt" "fmt"
"net"
"os"
"path/filepath" "path/filepath"
"strings"
"time" "time"
"github.com/spf13/cobra" "github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config" cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
pvm "github.com/tendermint/tendermint/types/priv_validator"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
) )
//flags
var ( var (
nValidators int nValidators int
dataDir string nNonValidators int
outputDir string
nodeDirPrefix string
populatePersistentPeers bool
hostnamePrefix string
startingIPAddress string
p2pPort int
)
const (
nodeDirPerm = 0755
) )
func init() { func init() {
TestnetFilesCmd.Flags().IntVar(&nValidators, "n", 4, TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4,
"Number of validators to initialize the testnet with") "Number of validators to initialize the testnet with")
TestnetFilesCmd.Flags().StringVar(&dataDir, "dir", "mytestnet", TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0,
"Number of non-validators to initialize the testnet with")
TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet",
"Directory to store initialization data for the testnet") "Directory to store initialization data for the testnet")
TestnetFilesCmd.Flags().StringVar(&nodeDirPrefix, "node-dir-prefix", "node",
"Prefix the directory name for each node with (node results in node0, node1, ...)")
TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true,
"Update config of each node with the list of persistent peers build using either hostname-prefix or starting-ip-address")
TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node",
"Hostname prefix (node results in persistent peers list ID0@node0:46656, ID1@node1:46656, ...)")
TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "",
"Starting IP address (192.168.0.1 results in persistent peers list ID0@192.168.0.1:46656, ID1@192.168.0.2:46656, ...)")
TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 46656,
"P2P Port")
} }
// TestnetFilesCmd allows initialisation of files for a // TestnetFilesCmd allows initialisation of files for a Tendermint testnet.
// Tendermint testnet.
var TestnetFilesCmd = &cobra.Command{ var TestnetFilesCmd = &cobra.Command{
Use: "testnet", Use: "testnet",
Short: "Initialize files for a Tendermint testnet", Short: "Initialize files for a Tendermint testnet",
Run: testnetFiles, RunE: testnetFiles,
} }
func testnetFiles(cmd *cobra.Command, args []string) { func testnetFiles(cmd *cobra.Command, args []string) error {
config := cfg.DefaultConfig()
genVals := make([]types.GenesisValidator, nValidators) genVals := make([]types.GenesisValidator, nValidators)
defaultConfig := cfg.DefaultBaseConfig()
// Initialize core dir and priv_validator.json's
for i := 0; i < nValidators; i++ { for i := 0; i < nValidators; i++ {
mach := cmn.Fmt("mach%d", i) nodeDirName := cmn.Fmt("%s%d", nodeDirPrefix, i)
err := initMachCoreDirectory(dataDir, mach) nodeDir := filepath.Join(outputDir, nodeDirName)
config.SetRoot(nodeDir)
err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm)
if err != nil { if err != nil {
cmn.Exit(err.Error()) _ = os.RemoveAll(outputDir)
return err
} }
// Read priv_validator.json to populate vals
privValFile := filepath.Join(dataDir, mach, defaultConfig.PrivValidator) initFilesWithConfig(config)
privVal := types.LoadPrivValidatorFS(privValFile)
pvFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidator)
pv := pvm.LoadFilePV(pvFile)
genVals[i] = types.GenesisValidator{ genVals[i] = types.GenesisValidator{
PubKey: privVal.GetPubKey(), PubKey: pv.GetPubKey(),
Power: 1, Power: 1,
Name: mach, Name: nodeDirName,
} }
} }
for i := 0; i < nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i+nValidators))
config.SetRoot(nodeDir)
err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm)
if err != nil {
_ = os.RemoveAll(outputDir)
return err
}
initFilesWithConfig(config)
}
// Generate genesis doc from generated validators // Generate genesis doc from generated validators
genDoc := &types.GenesisDoc{ genDoc := &types.GenesisDoc{
GenesisTime: time.Now(), GenesisTime: time.Now(),
@@ -63,36 +107,65 @@ func testnetFiles(cmd *cobra.Command, args []string) {
} }
// Write genesis file. // Write genesis file.
for i := 0; i < nValidators; i++ { for i := 0; i < nValidators+nNonValidators; i++ {
mach := cmn.Fmt("mach%d", i) nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
if err := genDoc.SaveAs(filepath.Join(dataDir, mach, defaultConfig.Genesis)); err != nil { if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil {
panic(err) _ = os.RemoveAll(outputDir)
return err
} }
} }
fmt.Println(cmn.Fmt("Successfully initialized %v node directories", nValidators)) if populatePersistentPeers {
err := populatePersistentPeersInConfigAndWriteIt(config)
if err != nil {
_ = os.RemoveAll(outputDir)
return err
}
}
fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators)
return nil
} }
// Initialize per-machine core directory func hostnameOrIP(i int) string {
func initMachCoreDirectory(base, mach string) error { if startingIPAddress != "" {
// Create priv_validator.json file if not present ip := net.ParseIP(startingIPAddress)
defaultConfig := cfg.DefaultBaseConfig() ip = ip.To4()
dir := filepath.Join(base, mach) if ip == nil {
privValPath := filepath.Join(dir, defaultConfig.PrivValidator) fmt.Printf("%v: non ipv4 address\n", startingIPAddress)
dir = filepath.Dir(privValPath) os.Exit(1)
err := cmn.EnsureDir(dir, 0700) }
for j := 0; j < i; j++ {
ip[3]++
}
return ip.String()
}
return fmt.Sprintf("%s%d", hostnamePrefix, i)
}
func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error {
persistentPeers := make([]string, nValidators+nNonValidators)
for i := 0; i < nValidators+nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
config.SetRoot(nodeDir)
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
if err != nil { if err != nil {
return err return err
} }
ensurePrivValidator(privValPath) persistentPeers[i] = p2p.IDAddressString(nodeKey.ID(), fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
return nil
}
func ensurePrivValidator(file string) {
if cmn.FileExists(file) {
return
} }
privValidator := types.GenPrivValidatorFS(file) persistentPeersList := strings.Join(persistentPeers, ",")
privValidator.Save()
for i := 0; i < nValidators+nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i))
config.SetRoot(nodeDir)
config.P2P.PersistentPeers = persistentPeersList
// overwrite default config
cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config)
}
return nil
} }

View File

@@ -0,0 +1,12 @@
package commands
import (
amino "github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
)
var cdc = amino.NewCodec()
func init() {
crypto.RegisterAmino(cdc)
}

View File

@@ -25,6 +25,7 @@ func main() {
cmd.ShowValidatorCmd, cmd.ShowValidatorCmd,
cmd.TestnetFilesCmd, cmd.TestnetFilesCmd,
cmd.ShowNodeIDCmd, cmd.ShowNodeIDCmd,
cmd.GenNodeKeyCmd,
cmd.VersionCmd) cmd.VersionCmd)
// NOTE: // NOTE:

View File

@@ -16,3 +16,8 @@ comment:
require_changes: no require_changes: no
require_base: no require_base: no
require_head: yes require_head: yes
ignore:
- "docs"
- "DOCKER"
- "scripts"

View File

@@ -137,10 +137,6 @@ type BaseConfig struct {
DBPath string `mapstructure:"db_dir"` DBPath string `mapstructure:"db_dir"`
} }
func (c BaseConfig) ChainID() string {
return c.chainID
}
// DefaultBaseConfig returns a default base configuration for a Tendermint node // DefaultBaseConfig returns a default base configuration for a Tendermint node
func DefaultBaseConfig() BaseConfig { func DefaultBaseConfig() BaseConfig {
return BaseConfig{ return BaseConfig{
@@ -161,32 +157,36 @@ func DefaultBaseConfig() BaseConfig {
// TestBaseConfig returns a base configuration for testing a Tendermint node // TestBaseConfig returns a base configuration for testing a Tendermint node
func TestBaseConfig() BaseConfig { func TestBaseConfig() BaseConfig {
conf := DefaultBaseConfig() cfg := DefaultBaseConfig()
conf.chainID = "tendermint_test" cfg.chainID = "tendermint_test"
conf.ProxyApp = "kvstore" cfg.ProxyApp = "kvstore"
conf.FastSync = false cfg.FastSync = false
conf.DBBackend = "memdb" cfg.DBBackend = "memdb"
return conf return cfg
}
func (cfg BaseConfig) ChainID() string {
return cfg.chainID
} }
// GenesisFile returns the full path to the genesis.json file // GenesisFile returns the full path to the genesis.json file
func (b BaseConfig) GenesisFile() string { func (cfg BaseConfig) GenesisFile() string {
return rootify(b.Genesis, b.RootDir) return rootify(cfg.Genesis, cfg.RootDir)
} }
// PrivValidatorFile returns the full path to the priv_validator.json file // PrivValidatorFile returns the full path to the priv_validator.json file
func (b BaseConfig) PrivValidatorFile() string { func (cfg BaseConfig) PrivValidatorFile() string {
return rootify(b.PrivValidator, b.RootDir) return rootify(cfg.PrivValidator, cfg.RootDir)
} }
// NodeKeyFile returns the full path to the node_key.json file // NodeKeyFile returns the full path to the node_key.json file
func (b BaseConfig) NodeKeyFile() string { func (cfg BaseConfig) NodeKeyFile() string {
return rootify(b.NodeKey, b.RootDir) return rootify(cfg.NodeKey, cfg.RootDir)
} }
// DBDir returns the full path to the database directory // DBDir returns the full path to the database directory
func (b BaseConfig) DBDir() string { func (cfg BaseConfig) DBDir() string {
return rootify(b.DBPath, b.RootDir) return rootify(cfg.DBPath, cfg.RootDir)
} }
// DefaultLogLevel returns a default log level of "error" // DefaultLogLevel returns a default log level of "error"
@@ -229,11 +229,11 @@ func DefaultRPCConfig() *RPCConfig {
// TestRPCConfig returns a configuration for testing the RPC server // TestRPCConfig returns a configuration for testing the RPC server
func TestRPCConfig() *RPCConfig { func TestRPCConfig() *RPCConfig {
conf := DefaultRPCConfig() cfg := DefaultRPCConfig()
conf.ListenAddress = "tcp://0.0.0.0:36657" cfg.ListenAddress = "tcp://0.0.0.0:36657"
conf.GRPCListenAddress = "tcp://0.0.0.0:36658" cfg.GRPCListenAddress = "tcp://0.0.0.0:36658"
conf.Unsafe = true cfg.Unsafe = true
return conf return cfg
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
@@ -270,7 +270,7 @@ type P2PConfig struct {
FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"` FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"`
// Maximum size of a message packet payload, in bytes // Maximum size of a message packet payload, in bytes
MaxMsgPacketPayloadSize int `mapstructure:"max_msg_packet_payload_size"` MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
// Rate at which packets can be sent, in bytes/second // Rate at which packets can be sent, in bytes/second
SendRate int64 `mapstructure:"send_rate"` SendRate int64 `mapstructure:"send_rate"`
@@ -302,7 +302,7 @@ func DefaultP2PConfig() *P2PConfig {
AddrBookStrict: true, AddrBookStrict: true,
MaxNumPeers: 50, MaxNumPeers: 50,
FlushThrottleTimeout: 100, FlushThrottleTimeout: 100,
MaxMsgPacketPayloadSize: 1024, // 1 kB MaxPacketMsgPayloadSize: 1024, // 1 kB
SendRate: 512000, // 500 kB/s SendRate: 512000, // 500 kB/s
RecvRate: 512000, // 500 kB/s RecvRate: 512000, // 500 kB/s
PexReactor: true, PexReactor: true,
@@ -313,16 +313,16 @@ func DefaultP2PConfig() *P2PConfig {
// TestP2PConfig returns a configuration for testing the peer-to-peer layer // TestP2PConfig returns a configuration for testing the peer-to-peer layer
func TestP2PConfig() *P2PConfig { func TestP2PConfig() *P2PConfig {
conf := DefaultP2PConfig() cfg := DefaultP2PConfig()
conf.ListenAddress = "tcp://0.0.0.0:36656" cfg.ListenAddress = "tcp://0.0.0.0:36656"
conf.SkipUPNP = true cfg.SkipUPNP = true
conf.FlushThrottleTimeout = 10 cfg.FlushThrottleTimeout = 10
return conf return cfg
} }
// AddrBookFile returns the full path to the address book // AddrBookFile returns the full path to the address book
func (p *P2PConfig) AddrBookFile() string { func (cfg *P2PConfig) AddrBookFile() string {
return rootify(p.AddrBook, p.RootDir) return rootify(cfg.AddrBook, cfg.RootDir)
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
@@ -351,14 +351,14 @@ func DefaultMempoolConfig() *MempoolConfig {
// TestMempoolConfig returns a configuration for testing the Tendermint mempool // TestMempoolConfig returns a configuration for testing the Tendermint mempool
func TestMempoolConfig() *MempoolConfig { func TestMempoolConfig() *MempoolConfig {
config := DefaultMempoolConfig() cfg := DefaultMempoolConfig()
config.CacheSize = 1000 cfg.CacheSize = 1000
return config return cfg
} }
// WalDir returns the full path to the mempool's write-ahead log // WalDir returns the full path to the mempool's write-ahead log
func (m *MempoolConfig) WalDir() string { func (cfg *MempoolConfig) WalDir() string {
return rootify(m.WalPath, m.RootDir) return rootify(cfg.WalPath, cfg.RootDir)
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
@@ -397,6 +397,44 @@ type ConsensusConfig struct {
PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"` PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"`
} }
// DefaultConsensusConfig returns a default configuration for the consensus service
func DefaultConsensusConfig() *ConsensusConfig {
return &ConsensusConfig{
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
WalLight: false,
TimeoutPropose: 3000,
TimeoutProposeDelta: 500,
TimeoutPrevote: 1000,
TimeoutPrevoteDelta: 500,
TimeoutPrecommit: 1000,
TimeoutPrecommitDelta: 500,
TimeoutCommit: 1000,
SkipTimeoutCommit: false,
MaxBlockSizeTxs: 10000,
MaxBlockSizeBytes: 1, // TODO
CreateEmptyBlocks: true,
CreateEmptyBlocksInterval: 0,
PeerGossipSleepDuration: 100,
PeerQueryMaj23SleepDuration: 2000,
}
}
// TestConsensusConfig returns a configuration for testing the consensus service
func TestConsensusConfig() *ConsensusConfig {
cfg := DefaultConsensusConfig()
cfg.TimeoutPropose = 100
cfg.TimeoutProposeDelta = 1
cfg.TimeoutPrevote = 10
cfg.TimeoutPrevoteDelta = 1
cfg.TimeoutPrecommit = 10
cfg.TimeoutPrecommitDelta = 1
cfg.TimeoutCommit = 10
cfg.SkipTimeoutCommit = true
cfg.PeerGossipSleepDuration = 5
cfg.PeerQueryMaj23SleepDuration = 250
return cfg
}
// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step // WaitForTxs returns true if the consensus should wait for transactions before entering the propose step
func (cfg *ConsensusConfig) WaitForTxs() bool { func (cfg *ConsensusConfig) WaitForTxs() bool {
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0 return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
@@ -437,55 +475,17 @@ func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration {
return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond
} }
// DefaultConsensusConfig returns a default configuration for the consensus service
func DefaultConsensusConfig() *ConsensusConfig {
return &ConsensusConfig{
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
WalLight: false,
TimeoutPropose: 3000,
TimeoutProposeDelta: 500,
TimeoutPrevote: 1000,
TimeoutPrevoteDelta: 500,
TimeoutPrecommit: 1000,
TimeoutPrecommitDelta: 500,
TimeoutCommit: 1000,
SkipTimeoutCommit: false,
MaxBlockSizeTxs: 10000,
MaxBlockSizeBytes: 1, // TODO
CreateEmptyBlocks: true,
CreateEmptyBlocksInterval: 0,
PeerGossipSleepDuration: 100,
PeerQueryMaj23SleepDuration: 2000,
}
}
// TestConsensusConfig returns a configuration for testing the consensus service
func TestConsensusConfig() *ConsensusConfig {
config := DefaultConsensusConfig()
config.TimeoutPropose = 100
config.TimeoutProposeDelta = 1
config.TimeoutPrevote = 10
config.TimeoutPrevoteDelta = 1
config.TimeoutPrecommit = 10
config.TimeoutPrecommitDelta = 1
config.TimeoutCommit = 10
config.SkipTimeoutCommit = true
config.PeerGossipSleepDuration = 5
config.PeerQueryMaj23SleepDuration = 250
return config
}
// WalFile returns the full path to the write-ahead log file // WalFile returns the full path to the write-ahead log file
func (c *ConsensusConfig) WalFile() string { func (cfg *ConsensusConfig) WalFile() string {
if c.walFile != "" { if cfg.walFile != "" {
return c.walFile return cfg.walFile
} }
return rootify(c.WalPath, c.RootDir) return rootify(cfg.WalPath, cfg.RootDir)
} }
// SetWalFile sets the path to the write-ahead log file // SetWalFile sets the path to the write-ahead log file
func (c *ConsensusConfig) SetWalFile(walFile string) { func (cfg *ConsensusConfig) SetWalFile(walFile string) {
c.walFile = walFile cfg.walFile = walFile
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------

View File

@@ -37,16 +37,21 @@ func EnsureRoot(rootDir string) {
// Write default config file if missing. // Write default config file if missing.
if !cmn.FileExists(configFilePath) { if !cmn.FileExists(configFilePath) {
writeConfigFile(configFilePath) writeDefaultCondigFile(configFilePath)
} }
} }
// XXX: this func should probably be called by cmd/tendermint/commands/init.go // XXX: this func should probably be called by cmd/tendermint/commands/init.go
// alongside the writing of the genesis.json and priv_validator.json // alongside the writing of the genesis.json and priv_validator.json
func writeConfigFile(configFilePath string) { func writeDefaultCondigFile(configFilePath string) {
WriteConfigFile(configFilePath, DefaultConfig())
}
// WriteConfigFile renders config using the template and writes it to configFilePath.
func WriteConfigFile(configFilePath string, config *Config) {
var buffer bytes.Buffer var buffer bytes.Buffer
if err := configTemplate.Execute(&buffer, DefaultConfig()); err != nil { if err := configTemplate.Execute(&buffer, config); err != nil {
panic(err) panic(err)
} }
@@ -124,11 +129,11 @@ unsafe = {{ .RPC.Unsafe }}
laddr = "{{ .P2P.ListenAddress }}" laddr = "{{ .P2P.ListenAddress }}"
# Comma separated list of seed nodes to connect to # Comma separated list of seed nodes to connect to
seeds = "" seeds = "{{ .P2P.Seeds }}"
# Comma separated list of nodes to keep persistent connections to # Comma separated list of nodes to keep persistent connections to
# Do not add private peers to this list if you don't want them advertised # Do not add private peers to this list if you don't want them advertised
persistent_peers = "" persistent_peers = "{{ .P2P.PersistentPeers }}"
# Path to address book # Path to address book
addr_book_file = "{{ .P2P.AddrBook }}" addr_book_file = "{{ .P2P.AddrBook }}"
@@ -143,7 +148,7 @@ flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }}
max_num_peers = {{ .P2P.MaxNumPeers }} max_num_peers = {{ .P2P.MaxNumPeers }}
# Maximum size of a message packet payload, in bytes # Maximum size of a message packet payload, in bytes
max_msg_packet_payload_size = {{ .P2P.MaxMsgPacketPayloadSize }} max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }}
# Rate at which packets can be sent, in bytes/second # Rate at which packets can be sent, in bytes/second
send_rate = {{ .P2P.SendRate }} send_rate = {{ .P2P.SendRate }}
@@ -262,7 +267,7 @@ func ResetTestRoot(testName string) *Config {
// Write default config file if missing. // Write default config file if missing.
if !cmn.FileExists(configFilePath) { if !cmn.FileExists(configFilePath) {
writeConfigFile(configFilePath) writeDefaultCondigFile(configFilePath)
} }
if !cmn.FileExists(genesisFilePath) { if !cmn.FileExists(genesisFilePath) {
cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644) cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
@@ -280,8 +285,8 @@ var testGenesis = `{
"validators": [ "validators": [
{ {
"pub_key": { "pub_key": {
"type": "ed25519", "type": "AC26791624DE60",
"data":"3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8" "value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
}, },
"power": 10, "power": 10,
"name": "" "name": ""
@@ -291,14 +296,14 @@ var testGenesis = `{
}` }`
var testPrivValidator = `{ var testPrivValidator = `{
"address": "D028C9981F7A87F3093672BF0D5B0E2A1B3ED456", "address": "849CB2C877F87A20925F35D00AE6688342D25B47",
"pub_key": { "pub_key": {
"type": "ed25519", "type": "AC26791624DE60",
"data": "3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8" "value": "AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="
}, },
"priv_key": { "priv_key": {
"type": "ed25519", "type": "954568A3288910",
"data": "27F82582AEFAE7AB151CFB01C48BB6C1A0DA78F9BDDA979A9F70A84D074EB07D3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8" "value": "EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="
}, },
"last_height": 0, "last_height": 0,
"last_round": 0, "last_round": 0,

View File

@@ -7,7 +7,6 @@ import (
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
crypto "github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
@@ -48,7 +47,9 @@ func TestByzantine(t *testing.T) {
for i := 0; i < N; i++ { for i := 0; i < N; i++ {
// make first val byzantine // make first val byzantine
if i == 0 { if i == 0 {
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator) // NOTE: Now, test validators are MockPV, which by default doesn't
// do any safety checks.
css[i].privValidator.(*types.MockPV).DisableChecks()
css[i].decideProposal = func(j int) func(int64, int) { css[i].decideProposal = func(j int) func(int64, int) {
return func(height int64, round int) { return func(height int64, round int) {
byzantineDecideProposalFunc(t, height, round, css[j], switches[j]) byzantineDecideProposalFunc(t, height, round, css[j], switches[j])
@@ -203,7 +204,7 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Cons
func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) { func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
// proposal // proposal
msg := &ProposalMessage{Proposal: proposal} msg := &ProposalMessage{Proposal: proposal}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))
// parts // parts
for i := 0; i < parts.Total(); i++ { for i := 0; i < parts.Total(); i++ {
@@ -213,7 +214,7 @@ func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.
Round: round, // This tells peer that this part applies to us. Round: round, // This tells peer that this part applies to us.
Part: part, Part: part,
} }
peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))
} }
// votes // votes
@@ -222,8 +223,8 @@ func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.
precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header()) precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header())
cs.mtx.Unlock() cs.mtx.Unlock()
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{prevote}}) peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{prevote}))
peer.Send(VoteChannel, struct{ ConsensusMessage }{&VoteMessage{precommit}}) peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{precommit}))
} }
//---------------------------------------- //----------------------------------------
@@ -264,47 +265,3 @@ func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
br.reactor.Receive(chID, peer, msgBytes) br.reactor.Receive(chID, peer, msgBytes)
} }
//----------------------------------------
// byzantine privValidator
type ByzantinePrivValidator struct {
types.Signer
pv types.PrivValidator
}
// Return a priv validator that will sign anything
func NewByzantinePrivValidator(pv types.PrivValidator) *ByzantinePrivValidator {
return &ByzantinePrivValidator{
Signer: pv.(*types.PrivValidatorFS).Signer,
pv: pv,
}
}
func (privVal *ByzantinePrivValidator) GetAddress() types.Address {
return privVal.pv.GetAddress()
}
func (privVal *ByzantinePrivValidator) GetPubKey() crypto.PubKey {
return privVal.pv.GetPubKey()
}
func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote) (err error) {
vote.Signature, err = privVal.Sign(vote.SignBytes(chainID))
return err
}
func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) (err error) {
proposal.Signature, _ = privVal.Sign(proposal.SignBytes(chainID))
return nil
}
func (privVal *ByzantinePrivValidator) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) (err error) {
heartbeat.Signature, _ = privVal.Sign(heartbeat.SignBytes(chainID))
return nil
}
func (privVal *ByzantinePrivValidator) String() string {
return cmn.Fmt("PrivValidator{%X}", privVal.GetAddress())
}

View File

@@ -21,6 +21,7 @@ import (
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state" sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
pvm "github.com/tendermint/tendermint/types/priv_validator"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@@ -101,13 +102,13 @@ func signVotes(voteType byte, hash []byte, header types.PartSetHeader, vss ...*v
func incrementHeight(vss ...*validatorStub) { func incrementHeight(vss ...*validatorStub) {
for _, vs := range vss { for _, vs := range vss {
vs.Height += 1 vs.Height++
} }
} }
func incrementRound(vss ...*validatorStub) { func incrementRound(vss ...*validatorStub) {
for _, vs := range vss { for _, vs := range vss {
vs.Round += 1 vs.Round++
} }
} }
@@ -222,7 +223,7 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
voteCh := make(chan interface{}) voteCh := make(chan interface{})
go func() { go func() {
for v := range voteCh0 { for v := range voteCh0 {
vote := v.(types.TMEventData).Unwrap().(types.EventDataVote) vote := v.(types.EventDataVote)
// we only fire for our own votes // we only fire for our own votes
if bytes.Equal(addr, vote.Vote.ValidatorAddress) { if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
voteCh <- v voteCh <- v
@@ -277,10 +278,10 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S
return cs return cs
} }
func loadPrivValidator(config *cfg.Config) *types.PrivValidatorFS { func loadPrivValidator(config *cfg.Config) *pvm.FilePV {
privValidatorFile := config.PrivValidatorFile() privValidatorFile := config.PrivValidatorFile()
ensureDir(path.Dir(privValidatorFile), 0700) ensureDir(path.Dir(privValidatorFile), 0700)
privValidator := types.LoadOrGenPrivValidatorFS(privValidatorFile) privValidator := pvm.LoadOrGenFilePV(privValidatorFile)
privValidator.Reset() privValidator.Reset()
return privValidator return privValidator
} }
@@ -378,7 +379,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
privVal = privVals[i] privVal = privVals[i]
} else { } else {
_, tempFilePath := cmn.Tempfile("priv_validator_") _, tempFilePath := cmn.Tempfile("priv_validator_")
privVal = types.GenPrivValidatorFS(tempFilePath) privVal = pvm.GenFilePV(tempFilePath)
} }
app := appFunc() app := appFunc()
@@ -394,7 +395,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int { func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
for i, s := range switches { for i, s := range switches {
if bytes.Equal(peer.NodeInfo().PubKey.Address(), s.NodeInfo().PubKey.Address()) { if peer.NodeInfo().ID == s.NodeInfo().ID {
return i return i
} }
} }
@@ -405,9 +406,9 @@ func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
//------------------------------------------------------------------------------- //-------------------------------------------------------------------------------
// genesis // genesis
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []*types.PrivValidatorFS) { func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
validators := make([]types.GenesisValidator, numValidators) validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]*types.PrivValidatorFS, numValidators) privValidators := make([]types.PrivValidator, numValidators)
for i := 0; i < numValidators; i++ { for i := 0; i < numValidators; i++ {
val, privVal := types.RandValidator(randPower, minPower) val, privVal := types.RandValidator(randPower, minPower)
validators[i] = types.GenesisValidator{ validators[i] = types.GenesisValidator{
@@ -425,7 +426,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
}, privValidators }, privValidators
} }
func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []*types.PrivValidatorFS) { func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) {
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower) genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
s0, _ := sm.MakeGenesisState(genDoc) s0, _ := sm.MakeGenesisState(genDoc)
db := dbm.NewMemDB() db := dbm.NewMemDB()

View File

@@ -108,7 +108,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
ticker := time.NewTicker(time.Second * 30) ticker := time.NewTicker(time.Second * 30)
select { select {
case b := <-newBlockCh: case b := <-newBlockCh:
evt := b.(types.TMEventData).Unwrap().(types.EventDataNewBlock) evt := b.(types.EventDataNewBlock)
nTxs += int(evt.Block.Header.NumTxs) nTxs += int(evt.Block.Header.NumTxs)
case <-ticker.C: case <-ticker.C:
panic("Timed out waiting to commit blocks with transactions") panic("Timed out waiting to commit blocks with transactions")
@@ -200,7 +200,7 @@ func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx {
Code: code.CodeTypeBadNonce, Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)} Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
} }
app.txCount += 1 app.txCount++
return abci.ResponseDeliverTx{Code: code.CodeTypeOK} return abci.ResponseDeliverTx{Code: code.CodeTypeOK}
} }
@@ -211,7 +211,7 @@ func (app *CounterApplication) CheckTx(tx []byte) abci.ResponseCheckTx {
Code: code.CodeTypeBadNonce, Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue)} Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue)}
} }
app.mempoolTxCount += 1 app.mempoolTxCount++
return abci.ResponseCheckTx{Code: code.CodeTypeOK} return abci.ResponseCheckTx{Code: code.CodeTypeOK}
} }
@@ -225,9 +225,8 @@ func (app *CounterApplication) Commit() abci.ResponseCommit {
app.mempoolTxCount = app.txCount app.mempoolTxCount = app.txCount
if app.txCount == 0 { if app.txCount == 0 {
return abci.ResponseCommit{} return abci.ResponseCommit{}
} else { }
hash := make([]byte, 8) hash := make([]byte, 8)
binary.BigEndian.PutUint64(hash, uint64(app.txCount)) binary.BigEndian.PutUint64(hash, uint64(app.txCount))
return abci.ResponseCommit{Data: hash} return abci.ResponseCommit{Data: hash}
}
} }

View File

@@ -1,7 +1,6 @@
package consensus package consensus
import ( import (
"bytes"
"context" "context"
"fmt" "fmt"
"reflect" "reflect"
@@ -10,7 +9,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
wire "github.com/tendermint/go-wire" amino "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@@ -26,7 +25,7 @@ const (
VoteChannel = byte(0x22) VoteChannel = byte(0x22)
VoteSetBitsChannel = byte(0x23) VoteSetBitsChannel = byte(0x23)
maxConsensusMessageSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes. maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes.
blocksToContributeToBecomeGoodPeer = 10000 blocksToContributeToBecomeGoodPeer = 10000
) )
@@ -113,24 +112,28 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
ID: StateChannel, ID: StateChannel,
Priority: 5, Priority: 5,
SendQueueCapacity: 100, SendQueueCapacity: 100,
RecvMessageCapacity: maxMsgSize,
}, },
{ {
ID: DataChannel, // maybe split between gossiping current block and catchup stuff ID: DataChannel, // maybe split between gossiping current block and catchup stuff
Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round
SendQueueCapacity: 100, SendQueueCapacity: 100,
RecvBufferCapacity: 50 * 4096, RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: maxMsgSize,
}, },
{ {
ID: VoteChannel, ID: VoteChannel,
Priority: 5, Priority: 5,
SendQueueCapacity: 100, SendQueueCapacity: 100,
RecvBufferCapacity: 100 * 100, RecvBufferCapacity: 100 * 100,
RecvMessageCapacity: maxMsgSize,
}, },
{ {
ID: VoteSetBitsChannel, ID: VoteSetBitsChannel,
Priority: 1, Priority: 1,
SendQueueCapacity: 2, SendQueueCapacity: 2,
RecvBufferCapacity: 1024, RecvBufferCapacity: 1024,
RecvMessageCapacity: maxMsgSize,
}, },
} }
} }
@@ -178,7 +181,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
return return
} }
_, msg, err := DecodeMessage(msgBytes) msg, err := DecodeMessage(msgBytes)
if err != nil { if err != nil {
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
conR.Switch.StopPeerForError(src, err) conR.Switch.StopPeerForError(src, err)
@@ -224,13 +227,13 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
conR.Logger.Error("Bad VoteSetBitsMessage field Type") conR.Logger.Error("Bad VoteSetBitsMessage field Type")
return return
} }
src.TrySend(VoteSetBitsChannel, struct{ ConsensusMessage }{&VoteSetBitsMessage{ src.TrySend(VoteSetBitsChannel, cdc.MustMarshalBinaryBare(&VoteSetBitsMessage{
Height: msg.Height, Height: msg.Height,
Round: msg.Round, Round: msg.Round,
Type: msg.Type, Type: msg.Type,
BlockID: msg.BlockID, BlockID: msg.BlockID,
Votes: ourVotes, Votes: ourVotes,
}}) }))
case *ProposalHeartbeatMessage: case *ProposalHeartbeatMessage:
hb := msg.Heartbeat hb := msg.Heartbeat
conR.Logger.Debug("Received proposal heartbeat message", conR.Logger.Debug("Received proposal heartbeat message",
@@ -371,27 +374,33 @@ func (conR *ConsensusReactor) startBroadcastRoutine() error {
} }
go func() { go func() {
var data interface{}
var ok bool
for { for {
select { select {
case data, ok := <-stepsCh: case data, ok = <-stepsCh:
if ok { // a receive from a closed channel returns the zero value immediately if ok { // a receive from a closed channel returns the zero value immediately
edrs := data.(types.TMEventData).Unwrap().(types.EventDataRoundState) edrs := data.(types.EventDataRoundState)
conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState)) conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState))
} }
case data, ok := <-votesCh: case data, ok = <-votesCh:
if ok { if ok {
edv := data.(types.TMEventData).Unwrap().(types.EventDataVote) edv := data.(types.EventDataVote)
conR.broadcastHasVoteMessage(edv.Vote) conR.broadcastHasVoteMessage(edv.Vote)
} }
case data, ok := <-heartbeatsCh: case data, ok = <-heartbeatsCh:
if ok { if ok {
edph := data.(types.TMEventData).Unwrap().(types.EventDataProposalHeartbeat) edph := data.(types.EventDataProposalHeartbeat)
conR.broadcastProposalHeartbeatMessage(edph) conR.broadcastProposalHeartbeatMessage(edph)
} }
case <-conR.Quit(): case <-conR.Quit():
conR.eventBus.UnsubscribeAll(ctx, subscriber) conR.eventBus.UnsubscribeAll(ctx, subscriber)
return return
} }
if !ok {
conR.eventBus.UnsubscribeAll(ctx, subscriber)
return
}
} }
}() }()
@@ -403,16 +412,16 @@ func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(heartbeat types.
conR.Logger.Debug("Broadcasting proposal heartbeat message", conR.Logger.Debug("Broadcasting proposal heartbeat message",
"height", hb.Height, "round", hb.Round, "sequence", hb.Sequence) "height", hb.Height, "round", hb.Round, "sequence", hb.Sequence)
msg := &ProposalHeartbeatMessage{hb} msg := &ProposalHeartbeatMessage{hb}
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{msg}) conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
} }
func (conR *ConsensusReactor) broadcastNewRoundStep(rs *cstypes.RoundState) { func (conR *ConsensusReactor) broadcastNewRoundStep(rs *cstypes.RoundState) {
nrsMsg, csMsg := makeRoundStepMessages(rs) nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil { if nrsMsg != nil {
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{nrsMsg}) conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
} }
if csMsg != nil { if csMsg != nil {
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{csMsg}) conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg))
} }
} }
@@ -424,7 +433,7 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) {
Type: vote.Type, Type: vote.Type,
Index: vote.ValidatorIndex, Index: vote.ValidatorIndex,
} }
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{msg}) conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg))
/* /*
// TODO: Make this broadcast more selective. // TODO: Make this broadcast more selective.
for _, peer := range conR.Switch.Peers().List() { for _, peer := range conR.Switch.Peers().List() {
@@ -464,10 +473,10 @@ func (conR *ConsensusReactor) sendNewRoundStepMessages(peer p2p.Peer) {
rs := conR.conS.GetRoundState() rs := conR.conS.GetRoundState()
nrsMsg, csMsg := makeRoundStepMessages(rs) nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil { if nrsMsg != nil {
peer.Send(StateChannel, struct{ ConsensusMessage }{nrsMsg}) peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg))
} }
if csMsg != nil { if csMsg != nil {
peer.Send(StateChannel, struct{ ConsensusMessage }{csMsg}) peer.Send(StateChannel, cdc.MustMarshalBinaryBare(csMsg))
} }
} }
@@ -494,7 +503,7 @@ OUTER_LOOP:
Part: part, Part: part,
} }
logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round) logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round)
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) { if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) {
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
} }
continue OUTER_LOOP continue OUTER_LOOP
@@ -538,7 +547,7 @@ OUTER_LOOP:
{ {
msg := &ProposalMessage{Proposal: rs.Proposal} msg := &ProposalMessage{Proposal: rs.Proposal}
logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round)
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) { if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) {
ps.SetHasProposal(rs.Proposal) ps.SetHasProposal(rs.Proposal)
} }
} }
@@ -553,7 +562,7 @@ OUTER_LOOP:
ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(), ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(),
} }
logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round)
peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))
} }
continue OUTER_LOOP continue OUTER_LOOP
} }
@@ -596,17 +605,15 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
Part: part, Part: part,
} }
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index) logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) { if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) {
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
} else { } else {
logger.Debug("Sending block part for catchup failed") logger.Debug("Sending block part for catchup failed")
} }
return return
} else { }
//logger.Info("No parts to send in catch-up, sleeping") //logger.Info("No parts to send in catch-up, sleeping")
time.Sleep(conR.conS.config.PeerGossipSleep()) time.Sleep(conR.conS.config.PeerGossipSleep())
return
}
} }
func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
@@ -735,12 +742,12 @@ OUTER_LOOP:
prs := ps.GetRoundState() prs := ps.GetRoundState()
if rs.Height == prs.Height { if rs.Height == prs.Height {
if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok {
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{ peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height, Height: prs.Height,
Round: prs.Round, Round: prs.Round,
Type: types.VoteTypePrevote, Type: types.VoteTypePrevote,
BlockID: maj23, BlockID: maj23,
}}) }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
} }
} }
@@ -752,12 +759,12 @@ OUTER_LOOP:
prs := ps.GetRoundState() prs := ps.GetRoundState()
if rs.Height == prs.Height { if rs.Height == prs.Height {
if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok {
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{ peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height, Height: prs.Height,
Round: prs.Round, Round: prs.Round,
Type: types.VoteTypePrecommit, Type: types.VoteTypePrecommit,
BlockID: maj23, BlockID: maj23,
}}) }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
} }
} }
@@ -769,12 +776,12 @@ OUTER_LOOP:
prs := ps.GetRoundState() prs := ps.GetRoundState()
if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 {
if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok {
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{ peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height, Height: prs.Height,
Round: prs.ProposalPOLRound, Round: prs.ProposalPOLRound,
Type: types.VoteTypePrevote, Type: types.VoteTypePrevote,
BlockID: maj23, BlockID: maj23,
}}) }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
} }
} }
@@ -788,12 +795,12 @@ OUTER_LOOP:
prs := ps.GetRoundState() prs := ps.GetRoundState()
if prs.CatchupCommitRound != -1 && 0 < prs.Height && prs.Height <= conR.conS.blockStore.Height() { if prs.CatchupCommitRound != -1 && 0 < prs.Height && prs.Height <= conR.conS.blockStore.Height() {
commit := conR.conS.LoadCommit(prs.Height) commit := conR.conS.LoadCommit(prs.Height)
peer.TrySend(StateChannel, struct{ ConsensusMessage }{&VoteSetMaj23Message{ peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{
Height: prs.Height, Height: prs.Height,
Round: commit.Round(), Round: commit.Round(),
Type: types.VoteTypePrecommit, Type: types.VoteTypePrecommit,
BlockID: commit.BlockID, BlockID: commit.BlockID,
}}) }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) time.Sleep(conR.conS.config.PeerQueryMaj23Sleep())
} }
} }
@@ -831,8 +838,8 @@ var (
ErrPeerStateInvalidStartTime = errors.New("Error peer state invalid startTime") ErrPeerStateInvalidStartTime = errors.New("Error peer state invalid startTime")
) )
// PeerState contains the known state of a peer, including its connection // PeerState contains the known state of a peer, including its connection and
// and threadsafe access to its PeerRoundState. // threadsafe access to its PeerRoundState.
type PeerState struct { type PeerState struct {
Peer p2p.Peer Peer p2p.Peer
logger log.Logger logger log.Logger
@@ -871,12 +878,14 @@ func NewPeerState(peer p2p.Peer) *PeerState {
} }
} }
// SetLogger allows to set a logger on the peer state. Returns the peer state
// itself.
func (ps *PeerState) SetLogger(logger log.Logger) *PeerState { func (ps *PeerState) SetLogger(logger log.Logger) *PeerState {
ps.logger = logger ps.logger = logger
return ps return ps
} }
// GetRoundState returns an atomic snapshot of the PeerRoundState. // GetRoundState returns an shallow copy of the PeerRoundState.
// There's no point in mutating it since it won't change PeerState. // There's no point in mutating it since it won't change PeerState.
func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState { func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState {
ps.mtx.Lock() ps.mtx.Lock()
@@ -886,6 +895,14 @@ func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState {
return &prs return &prs
} }
// GetRoundStateJSON returns a json of PeerRoundState, marshalled using go-amino.
func (ps *PeerState) GetRoundStateJSON() ([]byte, error) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return cdc.MarshalJSON(ps.PeerRoundState)
}
// GetHeight returns an atomic snapshot of the PeerRoundState's height // GetHeight returns an atomic snapshot of the PeerRoundState's height
// used by the mempool to ensure peers are caught up before broadcasting new txs // used by the mempool to ensure peers are caught up before broadcasting new txs
func (ps *PeerState) GetHeight() int64 { func (ps *PeerState) GetHeight() int64 {
@@ -944,7 +961,7 @@ func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool {
if vote, ok := ps.PickVoteToSend(votes); ok { if vote, ok := ps.PickVoteToSend(votes); ok {
msg := &VoteMessage{vote} msg := &VoteMessage{vote}
ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote)
return ps.Peer.Send(VoteChannel, struct{ ConsensusMessage }{msg}) return ps.Peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg))
} }
return false return false
} }
@@ -1048,7 +1065,7 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida
} }
} }
// EnsureVoteVitArrays ensures the bit-arrays have been allocated for tracking // EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking
// what votes this peer has received. // what votes this peer has received.
// NOTE: It's important to make sure that numValidators actually matches // NOTE: It's important to make sure that numValidators actually matches
// what the node sees as the number of validators for height. // what the node sees as the number of validators for height.
@@ -1083,36 +1100,49 @@ func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) {
// It returns the total number of votes (1 per block). This essentially means // It returns the total number of votes (1 per block). This essentially means
// the number of blocks for which peer has been sending us votes. // the number of blocks for which peer has been sending us votes.
func (ps *PeerState) RecordVote(vote *types.Vote) int { func (ps *PeerState) RecordVote(vote *types.Vote) int {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if ps.stats.lastVoteHeight >= vote.Height { if ps.stats.lastVoteHeight >= vote.Height {
return ps.stats.votes return ps.stats.votes
} }
ps.stats.lastVoteHeight = vote.Height ps.stats.lastVoteHeight = vote.Height
ps.stats.votes += 1 ps.stats.votes++
return ps.stats.votes return ps.stats.votes
} }
// VotesSent returns the number of blocks for which peer has been sending us // VotesSent returns the number of blocks for which peer has been sending us
// votes. // votes.
func (ps *PeerState) VotesSent() int { func (ps *PeerState) VotesSent() int {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return ps.stats.votes return ps.stats.votes
} }
// RecordVote updates internal statistics for this peer by recording the block part. // RecordBlockPart updates internal statistics for this peer by recording the
// It returns the total number of block parts (1 per block). This essentially means // block part. It returns the total number of block parts (1 per block). This
// the number of blocks for which peer has been sending us block parts. // essentially means the number of blocks for which peer has been sending us
// block parts.
func (ps *PeerState) RecordBlockPart(bp *BlockPartMessage) int { func (ps *PeerState) RecordBlockPart(bp *BlockPartMessage) int {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if ps.stats.lastBlockPartHeight >= bp.Height { if ps.stats.lastBlockPartHeight >= bp.Height {
return ps.stats.blockParts return ps.stats.blockParts
} }
ps.stats.lastBlockPartHeight = bp.Height ps.stats.lastBlockPartHeight = bp.Height
ps.stats.blockParts += 1 ps.stats.blockParts++
return ps.stats.blockParts return ps.stats.blockParts
} }
// BlockPartsSent returns the number of blocks for which peer has been sending // BlockPartsSent returns the number of blocks for which peer has been sending
// us block parts. // us block parts.
func (ps *PeerState) BlockPartsSent() int { func (ps *PeerState) BlockPartsSent() int {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return ps.stats.blockParts return ps.stats.blockParts
} }
@@ -1275,45 +1305,30 @@ func (ps *PeerState) StringIndented(indent string) string {
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Messages // Messages
const (
msgTypeNewRoundStep = byte(0x01)
msgTypeCommitStep = byte(0x02)
msgTypeProposal = byte(0x11)
msgTypeProposalPOL = byte(0x12)
msgTypeBlockPart = byte(0x13) // both block & POL
msgTypeVote = byte(0x14)
msgTypeHasVote = byte(0x15)
msgTypeVoteSetMaj23 = byte(0x16)
msgTypeVoteSetBits = byte(0x17)
msgTypeProposalHeartbeat = byte(0x20)
)
// ConsensusMessage is a message that can be sent and received on the ConsensusReactor // ConsensusMessage is a message that can be sent and received on the ConsensusReactor
type ConsensusMessage interface{} type ConsensusMessage interface{}
var _ = wire.RegisterInterface( func RegisterConsensusMessages(cdc *amino.Codec) {
struct{ ConsensusMessage }{}, cdc.RegisterInterface((*ConsensusMessage)(nil), nil)
wire.ConcreteType{&NewRoundStepMessage{}, msgTypeNewRoundStep}, cdc.RegisterConcrete(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage", nil)
wire.ConcreteType{&CommitStepMessage{}, msgTypeCommitStep}, cdc.RegisterConcrete(&CommitStepMessage{}, "tendermint/CommitStep", nil)
wire.ConcreteType{&ProposalMessage{}, msgTypeProposal}, cdc.RegisterConcrete(&ProposalMessage{}, "tendermint/Proposal", nil)
wire.ConcreteType{&ProposalPOLMessage{}, msgTypeProposalPOL}, cdc.RegisterConcrete(&ProposalPOLMessage{}, "tendermint/ProposalPOL", nil)
wire.ConcreteType{&BlockPartMessage{}, msgTypeBlockPart}, cdc.RegisterConcrete(&BlockPartMessage{}, "tendermint/BlockPart", nil)
wire.ConcreteType{&VoteMessage{}, msgTypeVote}, cdc.RegisterConcrete(&VoteMessage{}, "tendermint/Vote", nil)
wire.ConcreteType{&HasVoteMessage{}, msgTypeHasVote}, cdc.RegisterConcrete(&HasVoteMessage{}, "tendermint/HasVote", nil)
wire.ConcreteType{&VoteSetMaj23Message{}, msgTypeVoteSetMaj23}, cdc.RegisterConcrete(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23", nil)
wire.ConcreteType{&VoteSetBitsMessage{}, msgTypeVoteSetBits}, cdc.RegisterConcrete(&VoteSetBitsMessage{}, "tendermint/VoteSetBits", nil)
wire.ConcreteType{&ProposalHeartbeatMessage{}, msgTypeProposalHeartbeat}, cdc.RegisterConcrete(&ProposalHeartbeatMessage{}, "tendermint/ProposalHeartbeat", nil)
) }
// DecodeMessage decodes the given bytes into a ConsensusMessage. // DecodeMessage decodes the given bytes into a ConsensusMessage.
// TODO: check for unnecessary extra bytes at the end. func DecodeMessage(bz []byte) (msg ConsensusMessage, err error) {
func DecodeMessage(bz []byte) (msgType byte, msg ConsensusMessage, err error) { if len(bz) > maxMsgSize {
msgType = bz[0] return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
n := new(int) len(bz), maxMsgSize)
r := bytes.NewReader(bz) }
msgI := wire.ReadBinary(struct{ ConsensusMessage }{}, r, maxConsensusMessageSize, n, &err) err = cdc.UnmarshalBinaryBare(bz, &msg)
msg = msgI.(struct{ ConsensusMessage }).ConsensusMessage
return return
} }

View File

@@ -11,7 +11,6 @@ import (
"time" "time"
"github.com/tendermint/abci/example/kvstore" "github.com/tendermint/abci/example/kvstore"
wire "github.com/tendermint/tendermint/wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@@ -149,30 +148,30 @@ func TestReactorRecordsBlockParts(t *testing.T) {
Round: 0, Round: 0,
Part: parts.GetPart(0), Part: parts.GetPart(0),
} }
bz, err := wire.MarshalBinary(struct{ ConsensusMessage }{msg}) bz, err := cdc.MarshalBinaryBare(msg)
require.NoError(t, err) require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz) reactor.Receive(DataChannel, peer, bz)
assert.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should have increased by 1") require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should have increased by 1")
// 2) block part with the same height, but different round // 2) block part with the same height, but different round
msg.Round = 1 msg.Round = 1
bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{msg}) bz, err = cdc.MarshalBinaryBare(msg)
require.NoError(t, err) require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz) reactor.Receive(DataChannel, peer, bz)
assert.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same") require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
// 3) block part from earlier height // 3) block part from earlier height
msg.Height = 1 msg.Height = 1
msg.Round = 0 msg.Round = 0
bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{msg}) bz, err = cdc.MarshalBinaryBare(msg)
require.NoError(t, err) require.NoError(t, err)
reactor.Receive(DataChannel, peer, bz) reactor.Receive(DataChannel, peer, bz)
assert.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same") require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same")
} }
// Test we record votes from other peers // Test we record votes from other peers
@@ -204,7 +203,7 @@ func TestReactorRecordsVotes(t *testing.T) {
Type: types.VoteTypePrevote, Type: types.VoteTypePrevote,
BlockID: types.BlockID{}, BlockID: types.BlockID{},
} }
bz, err := wire.MarshalBinary(struct{ ConsensusMessage }{&VoteMessage{vote}}) bz, err := cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err) require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz) reactor.Receive(VoteChannel, peer, bz)
@@ -213,7 +212,7 @@ func TestReactorRecordsVotes(t *testing.T) {
// 2) vote with the same height, but different round // 2) vote with the same height, but different round
vote.Round = 1 vote.Round = 1
bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{&VoteMessage{vote}}) bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err) require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz) reactor.Receive(VoteChannel, peer, bz)
@@ -223,7 +222,7 @@ func TestReactorRecordsVotes(t *testing.T) {
vote.Height = 1 vote.Height = 1
vote.Round = 0 vote.Round = 0
bz, err = wire.MarshalBinary(struct{ ConsensusMessage }{&VoteMessage{vote}}) bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote})
require.NoError(t, err) require.NoError(t, err)
reactor.Receive(VoteChannel, peer, bz) reactor.Receive(VoteChannel, peer, bz)
@@ -410,7 +409,7 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}
if !ok { if !ok {
return return
} }
newBlock := newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block newBlock := newBlockI.(types.EventDataNewBlock).Block
css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height) css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height)
err := validateBlock(newBlock, activeVals) err := validateBlock(newBlock, activeVals)
assert.Nil(t, err) assert.Nil(t, err)
@@ -431,7 +430,7 @@ func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]st
if !ok { if !ok {
return return
} }
newBlock := newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block newBlock := newBlockI.(types.EventDataNewBlock).Block
css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height) css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height)
err := validateBlock(newBlock, activeVals) err := validateBlock(newBlock, activeVals)
assert.Nil(t, err) assert.Nil(t, err)
@@ -441,7 +440,7 @@ func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]st
// but they should be in order. // but they should be in order.
for _, tx := range newBlock.Data.Txs { for _, tx := range newBlock.Data.Txs {
assert.EqualValues(t, txs[ntxs], tx) assert.EqualValues(t, txs[ntxs], tx)
ntxs += 1 ntxs++
} }
if ntxs == len(txs) { if ntxs == len(txs) {
@@ -463,7 +462,7 @@ func waitForBlockWithUpdatedValsAndValidateIt(t *testing.T, n int, updatedVals m
if !ok { if !ok {
return return
} }
newBlock = newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block newBlock = newBlockI.(types.EventDataNewBlock).Block
if newBlock.LastCommit.Size() == len(updatedVals) { if newBlock.LastCommit.Size() == len(updatedVals) {
css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height) css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height)
break LOOP break LOOP

View File

@@ -112,7 +112,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
} }
} }
if found { if found {
return fmt.Errorf("WAL should not contain #ENDHEIGHT %d.", csHeight) return fmt.Errorf("WAL should not contain #ENDHEIGHT %d", csHeight)
} }
// Search for last height marker // Search for last height marker
@@ -125,7 +125,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
return err return err
} }
if !found { if !found {
return fmt.Errorf("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1) return fmt.Errorf("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d", csHeight, csHeight-1)
} }
defer gr.Close() // nolint: errcheck defer gr.Close() // nolint: errcheck
@@ -352,7 +352,7 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
var err error var err error
finalBlock := storeBlockHeight finalBlock := storeBlockHeight
if mutateState { if mutateState {
finalBlock -= 1 finalBlock--
} }
for i := appBlockHeight + 1; i <= finalBlock; i++ { for i := appBlockHeight + 1; i <= finalBlock; i++ {
h.logger.Info("Applying block", "height", i) h.logger.Info("Applying block", "height", i)
@@ -362,7 +362,7 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl
return nil, err return nil, err
} }
h.nBlocks += 1 h.nBlocks++
} }
if mutateState { if mutateState {
@@ -390,7 +390,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
return sm.State{}, err return sm.State{}, err
} }
h.nBlocks += 1 h.nBlocks++
return state, nil return state, nil
} }
@@ -429,7 +429,7 @@ type mockProxyApp struct {
func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx {
r := mock.abciResponses.DeliverTx[mock.txCount] r := mock.abciResponses.DeliverTx[mock.txCount]
mock.txCount += 1 mock.txCount++
return *r return *r
} }

View File

@@ -87,9 +87,9 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
} }
if nextN > 0 { if nextN > 0 {
nextN -= 1 nextN--
} }
pb.count += 1 pb.count++
} }
return nil return nil
} }
@@ -153,7 +153,7 @@ func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil { if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil {
return err return err
} }
pb.count += 1 pb.count++
} }
return nil return nil
} }
@@ -197,14 +197,13 @@ func (pb *playback) replayConsoleLoop() int {
if len(tokens) == 1 { if len(tokens) == 1 {
return 0 return 0
} else { }
i, err := strconv.Atoi(tokens[1]) i, err := strconv.Atoi(tokens[1])
if err != nil { if err != nil {
fmt.Println("next takes an integer argument") fmt.Println("next takes an integer argument")
} else { } else {
return i return i
} }
}
case "back": case "back":
// "back" -> go back one message // "back" -> go back one message

View File

@@ -17,8 +17,7 @@ import (
"github.com/tendermint/abci/example/kvstore" "github.com/tendermint/abci/example/kvstore"
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
crypto "github.com/tendermint/go-crypto" "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
auto "github.com/tendermint/tmlibs/autofile" auto "github.com/tendermint/tmlibs/autofile"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
@@ -27,6 +26,7 @@ import (
"github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state" sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
pvm "github.com/tendermint/tendermint/types/priv_validator"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
) )
@@ -60,7 +60,7 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64,
bytes, _ := ioutil.ReadFile(cs.config.WalFile()) bytes, _ := ioutil.ReadFile(cs.config.WalFile())
// fmt.Printf("====== WAL: \n\r%s\n", bytes) // fmt.Printf("====== WAL: \n\r%s\n", bytes)
t.Logf("====== WAL: \n\r%s\n", bytes) t.Logf("====== WAL: \n\r%X\n", bytes)
err := cs.Start() err := cs.Start()
require.NoError(t, err) require.NoError(t, err)
@@ -325,7 +325,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
walFile := tempWALWithData(walBody) walFile := tempWALWithData(walBody)
config.Consensus.SetWalFile(walFile) config.Consensus.SetWalFile(walFile)
privVal := types.LoadPrivValidatorFS(config.PrivValidatorFile()) privVal := pvm.LoadFilePV(config.PrivValidatorFile())
wal, err := NewWAL(walFile, false) wal, err := NewWAL(walFile, false)
if err != nil { if err != nil {
@@ -382,9 +382,9 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
expectedBlocksToSync := NUM_BLOCKS - nBlocks expectedBlocksToSync := NUM_BLOCKS - nBlocks
if nBlocks == NUM_BLOCKS && mode > 0 { if nBlocks == NUM_BLOCKS && mode > 0 {
expectedBlocksToSync += 1 expectedBlocksToSync++
} else if nBlocks > 0 && mode == 1 { } else if nBlocks > 0 && mode == 1 {
expectedBlocksToSync += 1 expectedBlocksToSync++
} }
if handshaker.NBlocks() != expectedBlocksToSync { if handshaker.NBlocks() != expectedBlocksToSync {
@@ -519,8 +519,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
case EndHeightMessage: case EndHeightMessage:
// if its not the first one, we have a full block // if its not the first one, we have a full block
if thisBlockParts != nil { if thisBlockParts != nil {
var n int var block = new(types.Block)
block := wire.ReadBinary(&types.Block{}, thisBlockParts.GetReader(), 0, &n, &err).(*types.Block) _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -533,7 +533,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
} }
blocks = append(blocks, block) blocks = append(blocks, block)
commits = append(commits, thisBlockCommit) commits = append(commits, thisBlockCommit)
height += 1 height++
} }
case *types.PartSetHeader: case *types.PartSetHeader:
thisBlockParts = types.NewPartSetFromHeader(*p) thisBlockParts = types.NewPartSetFromHeader(*p)
@@ -552,8 +552,8 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
} }
} }
// grab the last block too // grab the last block too
var n int var block = new(types.Block)
block := wire.ReadBinary(&types.Block{}, thisBlockParts.GetReader(), 0, &n, &err).(*types.Block) _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@@ -10,8 +10,6 @@ import (
"time" "time"
fail "github.com/ebuchman/fail-test" fail "github.com/ebuchman/fail-test"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@@ -170,18 +168,23 @@ func (cs *ConsensusState) GetState() sm.State {
return cs.state.Copy() return cs.state.Copy()
} }
// GetRoundState returns a copy of the internal consensus state. // GetRoundState returns a shallow copy of the internal consensus state.
func (cs *ConsensusState) GetRoundState() *cstypes.RoundState { func (cs *ConsensusState) GetRoundState() *cstypes.RoundState {
cs.mtx.Lock() cs.mtx.Lock()
defer cs.mtx.Unlock() defer cs.mtx.Unlock()
return cs.getRoundState()
}
func (cs *ConsensusState) getRoundState() *cstypes.RoundState {
rs := cs.RoundState // copy rs := cs.RoundState // copy
return &rs return &rs
} }
// GetRoundStateJSON returns a json of RoundState, marshalled using go-amino.
func (cs *ConsensusState) GetRoundStateJSON() ([]byte, error) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
return cdc.MarshalJSON(cs.RoundState)
}
// GetValidators returns a copy of the current validators. // GetValidators returns a copy of the current validators.
func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) { func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) {
cs.mtx.Lock() cs.mtx.Lock()
@@ -494,7 +497,7 @@ func (cs *ConsensusState) updateToState(state sm.State) {
func (cs *ConsensusState) newStep() { func (cs *ConsensusState) newStep() {
rs := cs.RoundStateEvent() rs := cs.RoundStateEvent()
cs.wal.Save(rs) cs.wal.Save(rs)
cs.nSteps += 1 cs.nSteps++
// newStep is called by updateToStep in NewConsensusState before the eventBus is set! // newStep is called by updateToStep in NewConsensusState before the eventBus is set!
if cs.eventBus != nil { if cs.eventBus != nil {
cs.eventBus.PublishEventNewRoundStep(rs) cs.eventBus.PublishEventNewRoundStep(rs)
@@ -720,11 +723,7 @@ func (cs *ConsensusState) needProofBlock(height int64) bool {
func (cs *ConsensusState) proposalHeartbeat(height int64, round int) { func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
counter := 0 counter := 0
addr := cs.privValidator.GetAddress() addr := cs.privValidator.GetAddress()
valIndex, v := cs.Validators.GetByAddress(addr) valIndex, _ := cs.Validators.GetByAddress(addr)
if v == nil {
// not a validator
valIndex = -1
}
chainID := cs.state.ChainID chainID := cs.state.ChainID
for { for {
rs := cs.GetRoundState() rs := cs.GetRoundState()
@@ -741,7 +740,7 @@ func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
} }
cs.privValidator.SignHeartbeat(chainID, heartbeat) cs.privValidator.SignHeartbeat(chainID, heartbeat)
cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat}) cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat})
counter += 1 counter++
time.Sleep(proposalHeartbeatIntervalSeconds * time.Second) time.Sleep(proposalHeartbeatIntervalSeconds * time.Second)
} }
} }
@@ -780,7 +779,7 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
// if not a validator, we're done // if not a validator, we're done
if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) { if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
cs.Logger.Debug("This node is not a validator") cs.Logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators)
return return
} }
cs.Logger.Debug("This node is a validator") cs.Logger.Debug("This node is a validator")
@@ -852,10 +851,10 @@ func (cs *ConsensusState) isProposalComplete() bool {
// make sure we have the prevotes from it too // make sure we have the prevotes from it too
if cs.Proposal.POLRound < 0 { if cs.Proposal.POLRound < 0 {
return true return true
} else { }
// if this is false the proposer is lying or we haven't received the POL yet // if this is false the proposer is lying or we haven't received the POL yet
return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority() return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority()
}
} }
// Create the next block to propose and return it. // Create the next block to propose and return it.
@@ -1301,10 +1300,10 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, v
} }
if added && cs.ProposalBlockParts.IsComplete() { if added && cs.ProposalBlockParts.IsComplete() {
// Added and completed! // Added and completed!
var n int _, err = cdc.UnmarshalBinaryReader(cs.ProposalBlockParts.GetReader(), &cs.ProposalBlock, int64(cs.state.ConsensusParams.BlockSize.MaxBytes))
var err error if err != nil {
cs.ProposalBlock = wire.ReadBinary(&types.Block{}, cs.ProposalBlockParts.GetReader(), return true, err
cs.state.ConsensusParams.BlockSize.MaxBytes, &n, &err).(*types.Block) }
// NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal // NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal
cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() { if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() {
@@ -1314,7 +1313,7 @@ func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, v
// If we're waiting on the proposal block... // If we're waiting on the proposal block...
cs.tryFinalizeCommit(height) cs.tryFinalizeCommit(height)
} }
return true, err return true, nil
} }
return added, nil return added, nil
} }
@@ -1498,12 +1497,11 @@ func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.Part
cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""}) cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""})
cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
return vote return vote
} else { }
//if !cs.replayMode { //if !cs.replayMode {
cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err)
//} //}
return nil return nil
}
} }
//--------------------------------------------------------- //---------------------------------------------------------

View File

@@ -261,7 +261,7 @@ func TestStateFullRound1(t *testing.T) {
// grab proposal // grab proposal
re := <-propCh re := <-propCh
propBlockHash := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash() propBlockHash := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash()
<-voteCh // wait for prevote <-voteCh // wait for prevote
validatePrevote(t, cs, round, vss[0], propBlockHash) validatePrevote(t, cs, round, vss[0], propBlockHash)
@@ -356,7 +356,7 @@ func TestStateLockNoPOL(t *testing.T) {
cs1.startRoutines(0) cs1.startRoutines(0)
re := <-proposalCh re := <-proposalCh
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
theBlockHash := rs.ProposalBlock.Hash() theBlockHash := rs.ProposalBlock.Hash()
<-voteCh // prevote <-voteCh // prevote
@@ -396,7 +396,7 @@ func TestStateLockNoPOL(t *testing.T) {
// now we're on a new round and not the proposer, so wait for timeout // now we're on a new round and not the proposer, so wait for timeout
re = <-timeoutProposeCh re = <-timeoutProposeCh
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
if rs.ProposalBlock != nil { if rs.ProposalBlock != nil {
panic("Expected proposal block to be nil") panic("Expected proposal block to be nil")
@@ -409,7 +409,7 @@ func TestStateLockNoPOL(t *testing.T) {
validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash()) validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash())
// add a conflicting prevote from the other validator // add a conflicting prevote from the other validator
signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) signAddVotes(cs1, types.VoteTypePrevote, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh <-voteCh
// now we're going to enter prevote again, but with invalid args // now we're going to enter prevote again, but with invalid args
@@ -424,7 +424,7 @@ func TestStateLockNoPOL(t *testing.T) {
// add conflicting precommit from vs2 // add conflicting precommit from vs2
// NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round
signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
<-voteCh <-voteCh
// (note we're entering precommit for a second time this round, but with invalid args // (note we're entering precommit for a second time this round, but with invalid args
@@ -440,7 +440,7 @@ func TestStateLockNoPOL(t *testing.T) {
incrementRound(vs2) incrementRound(vs2)
re = <-proposalCh re = <-proposalCh
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
// now we're on a new round and are the proposer // now we're on a new round and are the proposer
if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) { if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) {
@@ -529,7 +529,7 @@ func TestStateLockPOLRelock(t *testing.T) {
<-newRoundCh <-newRoundCh
re := <-proposalCh re := <-proposalCh
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
theBlockHash := rs.ProposalBlock.Hash() theBlockHash := rs.ProposalBlock.Hash()
<-voteCh // prevote <-voteCh // prevote
@@ -605,9 +605,9 @@ func TestStateLockPOLRelock(t *testing.T) {
discardFromChan(voteCh, 2) discardFromChan(voteCh, 2)
be := <-newBlockCh be := <-newBlockCh
b := be.(types.TMEventData).Unwrap().(types.EventDataNewBlockHeader) b := be.(types.EventDataNewBlockHeader)
re = <-newRoundCh re = <-newRoundCh
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
if rs.Height != 2 { if rs.Height != 2 {
panic("Expected height to increment") panic("Expected height to increment")
} }
@@ -643,7 +643,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
startTestRound(cs1, cs1.Height, 0) startTestRound(cs1, cs1.Height, 0)
<-newRoundCh <-newRoundCh
re := <-proposalCh re := <-proposalCh
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
theBlockHash := rs.ProposalBlock.Hash() theBlockHash := rs.ProposalBlock.Hash()
<-voteCh // prevote <-voteCh // prevote
@@ -669,7 +669,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
// timeout to new round // timeout to new round
re = <-timeoutWaitCh re = <-timeoutWaitCh
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
lockedBlockHash := rs.LockedBlock.Hash() lockedBlockHash := rs.LockedBlock.Hash()
//XXX: this isnt guaranteed to get there before the timeoutPropose ... //XXX: this isnt guaranteed to get there before the timeoutPropose ...
@@ -731,7 +731,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
startTestRound(cs1, cs1.Height, 0) startTestRound(cs1, cs1.Height, 0)
<-newRoundCh <-newRoundCh
re := <-proposalCh re := <-proposalCh
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
propBlock := rs.ProposalBlock propBlock := rs.ProposalBlock
<-voteCh // prevote <-voteCh // prevote
@@ -781,7 +781,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
re = <-proposalCh re = <-proposalCh
} }
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
if rs.LockedBlock != nil { if rs.LockedBlock != nil {
panic("we should not be locked!") panic("we should not be locked!")
@@ -1033,7 +1033,7 @@ func TestStateHalt1(t *testing.T) {
startTestRound(cs1, cs1.Height, 0) startTestRound(cs1, cs1.Height, 0)
<-newRoundCh <-newRoundCh
re := <-proposalCh re := <-proposalCh
rs := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
propBlock := rs.ProposalBlock propBlock := rs.ProposalBlock
propBlockParts := propBlock.MakePartSet(partSize) propBlockParts := propBlock.MakePartSet(partSize)
@@ -1056,7 +1056,7 @@ func TestStateHalt1(t *testing.T) {
// timeout to new round // timeout to new round
<-timeoutWaitCh <-timeoutWaitCh
re = <-newRoundCh re = <-newRoundCh
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
t.Log("### ONTO ROUND 1") t.Log("### ONTO ROUND 1")
/*Round2 /*Round2
@@ -1074,7 +1074,7 @@ func TestStateHalt1(t *testing.T) {
// receiving that precommit should take us straight to commit // receiving that precommit should take us straight to commit
<-newBlockCh <-newBlockCh
re = <-newRoundCh re = <-newRoundCh
rs = re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
if rs.Height != 2 { if rs.Height != 2 {
panic("expected height to increment") panic("expected height to increment")

View File

@@ -48,7 +48,7 @@ func TestPeerCatchupRounds(t *testing.T) {
} }
func makeVoteHR(t *testing.T, height int64, round int, privVals []*types.PrivValidatorFS, valIndex int) *types.Vote { func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivValidator, valIndex int) *types.Vote {
privVal := privVals[valIndex] privVal := privVals[valIndex]
vote := &types.Vote{ vote := &types.Vote{
ValidatorAddress: privVal.GetAddress(), ValidatorAddress: privVal.GetAddress(),

View File

@@ -52,9 +52,6 @@ func (rs RoundStepType) String() string {
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// RoundState defines the internal consensus state. // RoundState defines the internal consensus state.
// It is Immutable when returned from ConsensusState.GetRoundState()
// TODO: Actually, only the top pointer is copied,
// so access to field pointers is still racey
// NOTE: Not thread safe. Should only be manipulated by functions downstream // NOTE: Not thread safe. Should only be manipulated by functions downstream
// of the cs.receiveRoutine // of the cs.receiveRoutine
type RoundState struct { type RoundState struct {

View File

@@ -1,7 +1,6 @@
package consensus package consensus
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"hash/crc32" "hash/crc32"
@@ -11,7 +10,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
wire "github.com/tendermint/go-wire" amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
auto "github.com/tendermint/tmlibs/autofile" auto "github.com/tendermint/tmlibs/autofile"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
@@ -38,13 +37,13 @@ type EndHeightMessage struct {
type WALMessage interface{} type WALMessage interface{}
var _ = wire.RegisterInterface( func RegisterWALMessages(cdc *amino.Codec) {
struct{ WALMessage }{}, cdc.RegisterInterface((*WALMessage)(nil), nil)
wire.ConcreteType{types.EventDataRoundState{}, 0x01}, cdc.RegisterConcrete(types.EventDataRoundState{}, "tendermint/wal/EventDataRoundState", nil)
wire.ConcreteType{msgInfo{}, 0x02}, cdc.RegisterConcrete(msgInfo{}, "tendermint/wal/MsgInfo", nil)
wire.ConcreteType{timeoutInfo{}, 0x03}, cdc.RegisterConcrete(timeoutInfo{}, "tendermint/wal/TimeoutInfo", nil)
wire.ConcreteType{EndHeightMessage{}, 0x04}, cdc.RegisterConcrete(EndHeightMessage{}, "tendermint/wal/EndHeightMessage", nil)
) }
//-------------------------------------------------------- //--------------------------------------------------------
// Simple write-ahead logger // Simple write-ahead logger
@@ -193,7 +192,7 @@ func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions)
// A WALEncoder writes custom-encoded WAL messages to an output stream. // A WALEncoder writes custom-encoded WAL messages to an output stream.
// //
// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value (go-wire encoded) // Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value (go-amino encoded)
type WALEncoder struct { type WALEncoder struct {
wr io.Writer wr io.Writer
} }
@@ -205,7 +204,7 @@ func NewWALEncoder(wr io.Writer) *WALEncoder {
// Encode writes the custom encoding of v to the stream. // Encode writes the custom encoding of v to the stream.
func (enc *WALEncoder) Encode(v *TimedWALMessage) error { func (enc *WALEncoder) Encode(v *TimedWALMessage) error {
data := wire.BinaryBytes(v) data := cdc.MustMarshalBinaryBare(v)
crc := crc32.Checksum(data, crc32c) crc := crc32.Checksum(data, crc32c)
length := uint32(len(data)) length := uint32(len(data))
@@ -298,9 +297,8 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
return nil, DataCorruptionError{fmt.Errorf("checksums do not match: (read: %v, actual: %v)", crc, actualCRC)} return nil, DataCorruptionError{fmt.Errorf("checksums do not match: (read: %v, actual: %v)", crc, actualCRC)}
} }
var nn int var res = new(TimedWALMessage) // nolint: gosimple
var res *TimedWALMessage // nolint: gosimple err = cdc.UnmarshalBinaryBare(data, res)
res = wire.ReadBinary(&TimedWALMessage{}, bytes.NewBuffer(data), int(length), &nn, &err).(*TimedWALMessage)
if err != nil { if err != nil {
return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)} return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)}
} }

View File

@@ -4,7 +4,6 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"fmt" "fmt"
"math/rand"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@@ -17,6 +16,7 @@ import (
"github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state" sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
pvm "github.com/tendermint/tendermint/types/priv_validator"
auto "github.com/tendermint/tmlibs/autofile" auto "github.com/tendermint/tmlibs/autofile"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/db"
@@ -40,7 +40,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) {
// COPY PASTE FROM node.go WITH A FEW MODIFICATIONS // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS
// NOTE: we can't import node package because of circular dependency // NOTE: we can't import node package because of circular dependency
privValidatorFile := config.PrivValidatorFile() privValidatorFile := config.PrivValidatorFile()
privValidator := types.LoadOrGenPrivValidatorFS(privValidatorFile) privValidator := pvm.LoadOrGenFilePV(privValidatorFile)
genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to read genesis file") return nil, errors.Wrap(err, "failed to read genesis file")
@@ -116,7 +116,7 @@ func makePathname() string {
func randPort() int { func randPort() int {
// returns between base and base + spread // returns between base and base + spread
base, spread := 20000, 20000 base, spread := 20000, 20000
return base + rand.Intn(spread) return base + cmn.RandIntn(spread)
} }
func makeAddrs() (string, string, string) { func makeAddrs() (string, string, string) {

View File

@@ -3,11 +3,10 @@ package consensus
import ( import (
"bytes" "bytes"
"crypto/rand" "crypto/rand"
"sync" // "sync"
"testing" "testing"
"time" "time"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/consensus/types" "github.com/tendermint/tendermint/consensus/types"
tmtypes "github.com/tendermint/tendermint/types" tmtypes "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
@@ -36,7 +35,7 @@ func TestWALEncoderDecoder(t *testing.T) {
decoded, err := dec.Decode() decoded, err := dec.Decode()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, msg.Time.Truncate(time.Millisecond), decoded.Time) assert.Equal(t, msg.Time.UTC(), decoded.Time)
assert.Equal(t, msg.Msg, decoded.Msg) assert.Equal(t, msg.Msg, decoded.Msg)
} }
} }
@@ -68,6 +67,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height")) assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height"))
} }
/*
var initOnce sync.Once var initOnce sync.Once
func registerInterfacesOnce() { func registerInterfacesOnce() {
@@ -78,6 +78,7 @@ func registerInterfacesOnce() {
) )
}) })
} }
*/
func nBytes(n int) []byte { func nBytes(n int) []byte {
buf := make([]byte, n) buf := make([]byte, n)
@@ -86,7 +87,7 @@ func nBytes(n int) []byte {
} }
func benchmarkWalDecode(b *testing.B, n int) { func benchmarkWalDecode(b *testing.B, n int) {
registerInterfacesOnce() // registerInterfacesOnce()
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
enc := NewWALEncoder(buf) enc := NewWALEncoder(buf)

14
consensus/wire.go Normal file
View File

@@ -0,0 +1,14 @@
package consensus
import (
amino "github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
)
var cdc = amino.NewCodec()
func init() {
RegisterConsensusMessages(cdc)
RegisterWALMessages(cdc)
crypto.RegisterAmino(cdc)
}

68
docker-compose.yml Normal file
View File

@@ -0,0 +1,68 @@
version: '3'
services:
node0:
container_name: node0
image: "tendermint/localnode"
ports:
- "46656-46657:46656-46657"
environment:
- ID=0
- LOG=${LOG:-tendermint.log}
volumes:
- ${FOLDER:-./build}:/tendermint:Z
networks:
localnet:
ipv4_address: 192.167.10.2
node1:
container_name: node1
image: "tendermint/localnode"
ports:
- "46659-46660:46656-46657"
environment:
- ID=1
- LOG=${LOG:-tendermint.log}
volumes:
- ${FOLDER:-./build}:/tendermint:Z
networks:
localnet:
ipv4_address: 192.167.10.3
node2:
container_name: node2
image: "tendermint/localnode"
environment:
- ID=2
- LOG=${LOG:-tendermint.log}
ports:
- "46661-46662:46656-46657"
volumes:
- ${FOLDER:-./build}:/tendermint:Z
networks:
localnet:
ipv4_address: 192.167.10.4
node3:
container_name: node3
image: "tendermint/localnode"
environment:
- ID=3
- LOG=${LOG:-tendermint.log}
ports:
- "46663-46664:46656-46657"
volumes:
- ${FOLDER:-./build}:/tendermint:Z
networks:
localnet:
ipv4_address: 192.167.10.5
networks:
localnet:
driver: bridge
ipam:
driver: default
config:
-
subnet: 192.167.10.0/16

7
docker-compose/Makefile Normal file
View File

@@ -0,0 +1,7 @@
# Makefile for the "localnode" docker image.
all:
docker build --tag tendermint/localnode localnode
.PHONY: all

40
docker-compose/README.rst Normal file
View File

@@ -0,0 +1,40 @@
localnode
=========
It is assumed that you have already `setup docker <https://docs.docker.com/engine/installation/>`__.
Description
-----------
Image for local testnets.
Add the tendermint binary to the image by attaching it in a folder to the `/tendermint` mount point.
It assumes that the configuration was created by the `tendermint testnet` command and it is also attached to the `/tendermint` mount point.
Example:
This example builds a linux tendermint binary under the `build/` folder, creates tendermint configuration for a single-node validator and runs the node:
```
cd $GOPATH/src/github.com/tendermint/tendermint
#Build binary
make build-linux
#Create configuration
docker run -e LOG="stdout" -v `pwd`/build:/tendermint tendermint/localnode testnet --o . --v 1
#Run the node
docker run -v `pwd`/build:/tendermint tendermint/localnode
```
Logging
-------
Log is saved under the attached volume, in the `tendermint.log` file. If the `LOG` environment variable is set to `stdout` at start, the log is not saved, but printed on the screen.
Special binaries
----------------
If you have multiple binaries with different names, you can specify which one to run with the BINARY environment variable. The path of the binary is relative to the attached volume.
docker-compose.yml
==================
This file creates a 4-node network using the localnode image. The nodes of the network are exposed to the host machine on ports 46656-46657, 46659-46660, 46661-46662, 46663-46664 respectively.

View File

@@ -0,0 +1,16 @@
FROM alpine:3.7
MAINTAINER Greg Szabo <greg@tendermint.com>
RUN apk update && \
apk upgrade && \
apk --no-cache add curl jq file
VOLUME [ /tendermint ]
WORKDIR /tendermint
EXPOSE 46656 46657
ENTRYPOINT ["/usr/bin/wrapper.sh"]
CMD ["node", "--proxy_app dummy"]
STOPSIGNAL SIGTERM
COPY wrapper.sh /usr/bin/wrapper.sh

View File

@@ -0,0 +1,33 @@
#!/usr/bin/env sh
##
## Input parameters
##
BINARY=/tendermint/${BINARY:-tendermint}
ID=${ID:-0}
LOG=${LOG:-tendermint.log}
##
## Assert linux binary
##
if ! [ -f "${BINARY}" ]; then
echo "The binary `basename ${BINARY}` cannot be found. Please add the binary to the shared folder. Please use the BINARY environment variable if the name of the binary is not 'tendermint' E.g.: -e BINARY=tendermint_my_test_version"
exit 1
fi
BINARY_CHECK="`file $BINARY | grep 'ELF 64-bit LSB executable, x86-64'`"
if [ -z "${BINARY_CHECK}" ]; then
echo "Binary needs to be OS linux, ARCH amd64"
exit 1
fi
##
## Run binary with all parameters
##
export TMHOME="/tendermint/node${ID}"
if [ -d "${TMHOME}/${LOG}" ]; then
"$BINARY" $@ | tee "${TMHOME}/${LOG}"
else
"$BINARY" $@
fi

View File

@@ -1,128 +1,29 @@
# ADR 008: PrivValidator # ADR 008: SocketPV
## Context Tendermint node's should support only two in-process PrivValidator
implementations:
The current PrivValidator is monolithic and isn't easily reuseable by alternative signers. - FilePV uses an unencrypted private key in a "priv_validator.json" file - no
configuration required (just `tendermint init`).
- SocketPV uses a socket to send signing requests to another process - user is
responsible for starting that process themselves.
For instance, see https://github.com/tendermint/tendermint/issues/673 The SocketPV address can be provided via flags at the command line - doing so
will cause Tendermint to ignore any "priv_validator.json" file and to listen on
the given address for incoming connections from an external priv_validator
process. It will halt any operation until at least one external process
succesfully connected.
The goal is to have a clean PrivValidator interface like: The external priv_validator process will dial the address to connect to
Tendermint, and then Tendermint will send requests on the ensuing connection to
sign votes and proposals. Thus the external process initiates the connection,
but the Tendermint process makes all requests. In a later stage we're going to
support multiple validators for fault tolerance. To prevent double signing they
need to be synced, which is deferred to an external solution (see #1185).
``` In addition, Tendermint will provide implementations that can be run in that
type PrivValidator interface { external process. These include:
Address() data.Bytes
PubKey() crypto.PubKey
SignVote(chainID string, vote *types.Vote) error
SignProposal(chainID string, proposal *types.Proposal) error
SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error
}
```
It should also be easy to re-use the LastSignedInfo logic to avoid double signing.
## Decision
Tendermint node's should support only two in-process PrivValidator implementations:
- PrivValidatorUnencrypted uses an unencrypted private key in a "priv_validator.json" file - no configuration required (just `tendermint init`).
- PrivValidatorSocket uses a socket to send signing requests to another process - user is responsible for starting that process themselves.
The PrivValidatorSocket address can be provided via flags at the command line -
doing so will cause Tendermint to ignore any "priv_validator.json" file and to listen
on the given address for incoming connections from an external priv_validator process.
It will halt any operation until at least one external process succesfully
connected.
The external priv_validator process will dial the address to connect to Tendermint,
and then Tendermint will send requests on the ensuing connection to sign votes and proposals.
Thus the external process initiates the connection, but the Tendermint process makes all requests.
In a later stage we're going to support multiple validators for fault
tolerance. To prevent double signing they need to be synced, which is deferred
to an external solution (see #1185).
In addition, Tendermint will provide implementations that can be run in that external process.
These include:
- PrivValidatorEncrypted uses an encrypted private key persisted to disk - user must enter password to decrypt key when process is started.
- PrivValidatorLedger uses a Ledger Nano S to handle all signing.
What follows are descriptions of useful types
### Signer
```
type Signer interface {
Sign(msg []byte) (crypto.Signature, error)
}
```
Signer signs a message. It can also return an error.
### ValidatorID
ValidatorID is just the Address and PubKey
```
type ValidatorID struct {
Address data.Bytes `json:"address"`
PubKey crypto.PubKey `json:"pub_key"`
}
```
### LastSignedInfo
LastSignedInfo tracks the last thing we signed:
```
type LastSignedInfo struct {
Height int64 `json:"height"`
Round int `json:"round"`
Step int8 `json:"step"`
Signature crypto.Signature `json:"signature,omitempty"` // so we dont lose signatures
SignBytes data.Bytes `json:"signbytes,omitempty"` // so we dont lose signatures
}
```
It exposes methods for signing votes and proposals using a `Signer`.
This allows it to easily be reused by developers implemented their own PrivValidator.
### PrivValidatorUnencrypted
```
type PrivValidatorUnencrypted struct {
ID types.ValidatorID `json:"id"`
PrivKey PrivKey `json:"priv_key"`
LastSignedInfo *LastSignedInfo `json:"last_signed_info"`
}
```
Has the same structure as currently, but broken up into sub structs.
Note the LastSignedInfo is mutated in place every time we sign.
### PrivValidatorJSON
The "priv_validator.json" file supports only the PrivValidatorUnencrypted type.
It unmarshals into PrivValidatorJSON, which is used as the default PrivValidator type.
It wraps the PrivValidatorUnencrypted and persists it to disk after every signature.
## Status
Accepted.
## Consequences
### Positive
- Cleaner separation of components enabling re-use.
### Negative
- More files - led to creation of new directory.
### Neutral
- FilePV will encrypt the private key, and the user must enter password to
decrypt key when process is started.
- LedgerPV uses a Ledger Nano S to handle all signing.

View File

@@ -196,9 +196,8 @@ urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/statefuls
urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets_dir+'/t_plus_k.png') urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets_dir+'/t_plus_k.png')
urllib.urlretrieve(tools_repo+tools_branch+'/terraform-digitalocean/README.rst', filename=tools_dir+'/terraform-digitalocean.rst') urllib.urlretrieve(tools_repo+tools_branch+'/terraform-digitalocean/README.rst', filename=tools_dir+'/terraform-digitalocean.rst')
urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking-and-monitoring.rst') urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking.rst')
# the readme for below is included in tm-bench urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/monitoring.rst')
# urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/tm-monitor.rst')
#### abci spec ################################# #### abci spec #################################

View File

@@ -11,26 +11,26 @@ Manual Deployments
It's relatively easy to setup a Tendermint cluster manually. The only It's relatively easy to setup a Tendermint cluster manually. The only
requirements for a particular Tendermint node are a private key for the requirements for a particular Tendermint node are a private key for the
validator, stored as ``priv_validator.json``, and a list of the public validator, stored as ``priv_validator.json``, a node key, stored as
keys of all validators, stored as ``genesis.json``. These files should ``node_key.json`` and a list of the public keys of all validators, stored as
be stored in ``~/.tendermint/config``, or wherever the ``$TMHOME`` variable ``genesis.json``. These files should be stored in ``~/.tendermint/config``, or
might be set to. wherever the ``$TMHOME`` variable might be set to.
Here are the steps to setting up a testnet manually: Here are the steps to setting up a testnet manually:
1) Provision nodes on your cloud provider of choice 1) Provision nodes on your cloud provider of choice
2) Install Tendermint and the application of interest on all nodes 2) Install Tendermint and the application of interest on all nodes
3) Generate a private key for each validator using 3) Generate a private key and a node key for each validator using
``tendermint gen_validator`` ``tendermint init``
4) Compile a list of public keys for each validator into a 4) Compile a list of public keys for each validator into a
``genesis.json`` file. ``genesis.json`` file and replace the existing file with it.
5) Run ``tendermint node --p2p.persistent_peers=< peer addresses >`` on each node, 5) Run ``tendermint node --p2p.persistent_peers=< peer addresses >`` on each node,
where ``< peer addresses >`` is a comma separated list of the IP:PORT where ``< peer addresses >`` is a comma separated list of the IP:PORT
combination for each node. The default port for Tendermint is combination for each node. The default port for Tendermint is
``46656``. Thus, if the IP addresses of your nodes were ``46656``. Thus, if the IP addresses of your nodes were
``192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4``, the command ``192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4``, the command
would look like: would look like:
``tendermint node --p2p.persistent_peers=192.168.0.1:46656,192.168.0.2:46656,192.168.0.3:46656,192.168.0.4:46656``. ``tendermint node --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:46656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:46656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:46656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:46656``.
After a few seconds, all the nodes should connect to each other and start After a few seconds, all the nodes should connect to each other and start
making blocks! For more information, see the Tendermint Networks section making blocks! For more information, see the Tendermint Networks section

View File

@@ -2,8 +2,9 @@
## Overview ## Overview
This is a quick start guide. If you have a vague idea about how Tendermint works This is a quick start guide. If you have a vague idea about how Tendermint
and want to get started right away, continue. Otherwise, [review the documentation](http://tendermint.readthedocs.io/en/master/) works and want to get started right away, continue. Otherwise, [review the
documentation](http://tendermint.readthedocs.io/en/master/).
## Install ## Install
@@ -42,7 +43,7 @@ Confirm installation:
``` ```
$ tendermint version $ tendermint version
0.15.0-381fe19 0.18.0-XXXXXXX
``` ```
## Initialization ## Initialization
@@ -117,7 +118,9 @@ where the value is returned in hex.
## Cluster of Nodes ## Cluster of Nodes
First create four Ubuntu cloud machines. The following was tested on Digital Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP addresses below as IP1, IP2, IP3, IP4. First create four Ubuntu cloud machines. The following was tested on Digital
Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP
addresses below as IP1, IP2, IP3, IP4.
Then, `ssh` into each machine, and execute [this script](https://git.io/vNLfY): Then, `ssh` into each machine, and execute [this script](https://git.io/vNLfY):
@@ -131,12 +134,16 @@ This will install `go` and other dependencies, get the Tendermint source code, t
Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence: Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence:
``` ```
tendermint node --home ./node1 --proxy_app=kvstore --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656 tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
tendermint node --home ./node2 --proxy_app=kvstore --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656 tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
tendermint node --home ./node3 --proxy_app=kvstore --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656 tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
tendermint node --home ./node4 --proxy_app=kvstore --p2p.seeds IP1:46656,IP2:46656,IP3:46656,IP4:46656 tendermint node --home ./node4 --proxy_app=kvstore --p2p.persistent_peers="3a558bd6f8c97453aa6c2372bb800e8b6ed8e6db@IP1:46656,ccf30d873fddda10a495f42687c8f33472a6569f@IP2:46656,9a4c3de5d6788a76c6ee3cd9ff41e3b45b4cfd14@IP3:46656,58e6f2ab297b3ceae107ba4c8c2898da5c009ff4@IP4:46656"
``` ```
Note that after the third node is started, blocks will start to stream in because >2/3 of validators (defined in the `genesis.json`) have come online. Seeds can also be specified in the `config.toml`. See [this PR](https://github.com/tendermint/tendermint/pull/792) for more information about configuration options. Note that after the third node is started, blocks will start to stream in
because >2/3 of validators (defined in the `genesis.json`) have come online.
Seeds can also be specified in the `config.toml`. See [this
PR](https://github.com/tendermint/tendermint/pull/792) for more information
about configuration options.
Transactions can then be sent as covered in the single, local node example above. Transactions can then be sent as covered in the single, local node example above.

View File

@@ -26,7 +26,7 @@ go get $REPO
cd $GOPATH/src/$REPO cd $GOPATH/src/$REPO
## build ## build
git checkout v0.17.0 git checkout v0.18.0
make get_tools make get_tools
make get_vendor_deps make get_vendor_deps
make install make install

View File

@@ -0,0 +1,6 @@
{
"priv_key" : {
"data" : "DA9BAABEA7211A6D93D9A1986B4279EAB3021FAA1653D459D53E6AB4D1CFB4C69BF7D52E48CF00AC5779AA0A6D3C368955D5636A677F72370B8ED19989714CFC",
"type" : "ed25519"
}
}

View File

@@ -0,0 +1,6 @@
{
"priv_key" : {
"data" : "F7BCABA165DFC0DDD50AE563EFB285BAA236EA805D35612504238A36EFA105958756442B1D9F942D7ABD259F2D59671657B6378E9C7194342A7AAA47A66D1E95",
"type" : "ed25519"
}
}

View File

@@ -0,0 +1,6 @@
{
"priv_key" : {
"data" : "95136FCC97E4446B3141EDF9841078107ECE755E99925D79CCBF91085492680B3CA1034D9917DF1DED4E4AB2D9BC225919F6CB2176F210D2368697CC339DF4E7",
"type" : "ed25519"
}
}

View File

@@ -0,0 +1,6 @@
{
"priv_key" : {
"data" : "8895D6C9A1B46AB83A8E2BAE2121B8C3E245B9E9126EBD797FEAC5058285F2F64FDE2E8182C88AD5185A49D837C581465D57BD478C41865A66D7D9742D8AEF57",
"type" : "ed25519"
}
}

View File

@@ -81,9 +81,8 @@ Tendermint node as follows:
curl -s localhost:46657/status curl -s localhost:46657/status
The ``-s`` just silences ``curl``. For nicer output, pipe the result The ``-s`` just silences ``curl``. For nicer output, pipe the result into a
into a tool like `jq <https://stedolan.github.io/jq/>`__ or tool like `jq <https://stedolan.github.io/jq/>`__ or ``json_pp``.
`jsonpp <https://github.com/jmhodges/jsonpp>`__.
Now let's send some transactions to the kvstore. Now let's send some transactions to the kvstore.
@@ -104,17 +103,23 @@ like:
"id": "", "id": "",
"result": { "result": {
"check_tx": { "check_tx": {
"code": 0, "fee": {}
"data": "",
"log": ""
}, },
"deliver_tx": { "deliver_tx": {
"code": 0, "tags": [
"data": "", {
"log": "" "key": "YXBwLmNyZWF0b3I=",
"value": "amFl"
}, },
"hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF", {
"height": 154 "key": "YXBwLmtleQ==",
"value": "YWJjZA=="
}
],
"fee": {}
},
"hash": "9DF66553F98DE3C26E3C3317A3E4CED54F714E39",
"height": 14
} }
} }
@@ -134,20 +139,17 @@ The result should look like:
"id": "", "id": "",
"result": { "result": {
"response": { "response": {
"code": 0, "log": "exists",
"index": 0, "index": "-1",
"key": "", "key": "YWJjZA==",
"value": "61626364", "value": "YWJjZA=="
"proof": "",
"height": 0,
"log": "exists"
} }
} }
} }
Note the ``value`` in the result (``61626364``); this is the Note the ``value`` in the result (``YWJjZA==``); this is the
hex-encoding of the ASCII of ``abcd``. You can verify this in base64-encoding of the ASCII of ``abcd``. You can verify this in
a python 2 shell by running ``"61626364".decode('hex')`` or in python 3 shell by running ``import codecs; codecs.decode("61626364", 'hex').decode('ascii')``. Stay a python 2 shell by running ``"61626364".decode('base64')`` or in python 3 shell by running ``import codecs; codecs.decode("61626364", 'base64').decode('ascii')``. Stay
tuned for a future release that `makes this output more human-readable <https://github.com/tendermint/abci/issues/32>`__. tuned for a future release that `makes this output more human-readable <https://github.com/tendermint/abci/issues/32>`__.
Now let's try setting a different key and value: Now let's try setting a different key and value:
@@ -157,7 +159,7 @@ Now let's try setting a different key and value:
curl -s 'localhost:46657/broadcast_tx_commit?tx="name=satoshi"' curl -s 'localhost:46657/broadcast_tx_commit?tx="name=satoshi"'
Now if we query for ``name``, we should get ``satoshi``, or Now if we query for ``name``, we should get ``satoshi``, or
``7361746F736869`` in hex: ``c2F0b3NoaQ==`` in base64:
:: ::
@@ -226,17 +228,15 @@ the number ``1``. If instead, we try to send a ``5``, we get an error:
"id": "", "id": "",
"result": { "result": {
"check_tx": { "check_tx": {
"code": 0, "fee": {}
"data": "",
"log": ""
}, },
"deliver_tx": { "deliver_tx": {
"code": 3, "code": 2,
"data": "", "log": "Invalid nonce. Expected 1, got 5",
"log": "Invalid nonce. Expected 1, got 5" "fee": {}
}, },
"hash": "33B93DFF98749B0D6996A70F64071347060DC19C", "hash": "33B93DFF98749B0D6996A70F64071347060DC19C",
"height": 38 "height": 34
} }
} }
@@ -250,17 +250,13 @@ But if we send a ``1``, it works again:
"id": "", "id": "",
"result": { "result": {
"check_tx": { "check_tx": {
"code": 0, "fee": {}
"data": "",
"log": ""
}, },
"deliver_tx": { "deliver_tx": {
"code": 0, "fee": {}
"data": "",
"log": ""
}, },
"hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D", "hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D",
"height": 87 "height": 60
} }
} }

View File

@@ -59,7 +59,7 @@ Next we replay all the messages from the WAL.
:: ::
I[10-04|13:54:30.391] Starting RPC HTTP server on tcp socket 0.0.0.0:46657 module=rpc-server I[10-04|13:54:30.391] Starting RPC HTTP server on tcp socket 0.0.0.0:46657 module=rpc-server
I[10-04|13:54:30.392] Started node module=main nodeInfo="NodeInfo{pk: PubKeyEd25519{DF22D7C92C91082324A1312F092AA1DA197FA598DBBFB6526E177003C4D6FD66}, moniker: anonymous, network: test-chain-3MNw2N [remote , listen 10.0.2.15:46656], version: 0.11.0-10f361fc ([wire_version=0.6.2 p2p_version=0.5.0 consensus_version=v1/0.2.2 rpc_version=0.7.0/3 tx_index=on rpc_addr=tcp://0.0.0.0:46657])}" I[10-04|13:54:30.392] Started node module=main nodeInfo="NodeInfo{id: DF22D7C92C91082324A1312F092AA1DA197FA598DBBFB6526E, moniker: anonymous, network: test-chain-3MNw2N [remote , listen 10.0.2.15:46656], version: 0.11.0-10f361fc ([wire_version=0.6.2 p2p_version=0.5.0 consensus_version=v1/0.2.2 rpc_version=0.7.0/3 tx_index=on rpc_addr=tcp://0.0.0.0:46657])}"
Next follows a standard block creation cycle, where we enter a new round, Next follows a standard block creation cycle, where we enter a new round,
propose a block, receive more than 2/3 of prevotes, then precommits and finally propose a block, receive more than 2/3 of prevotes, then precommits and finally

View File

@@ -44,7 +44,8 @@ Tendermint Tools
tools/docker.rst tools/docker.rst
tools/mintnet-kubernetes.rst tools/mintnet-kubernetes.rst
tools/terraform-digitalocean.rst tools/terraform-digitalocean.rst
tools/benchmarking-and-monitoring.rst tools/benchmarking.rst
tools/monitoring.rst
Tendermint 102 Tendermint 102
-------------- --------------

View File

@@ -4,7 +4,7 @@ Install Tendermint
From Binary From Binary
----------- -----------
To download pre-built binaries, see the `Download page <https://tendermint.com/download>`__. To download pre-built binaries, see the `Download page <https://tendermint.com/downloads>`__.
From Source From Source
----------- -----------
@@ -37,13 +37,13 @@ First, install ``dep``:
:: ::
cd $GOPATH/src/github.com/tendermint/tendermint
make get_tools make get_tools
Now we can fetch the correct versions of each dependency by running: Now we can fetch the correct versions of each dependency by running:
:: ::
cd $GOPATH/src/github.com/tendermint/tendermint
make get_vendor_deps make get_vendor_deps
make install make install
@@ -96,6 +96,7 @@ If ``go get`` failing bothers you, fetch the code using ``git``:
mkdir -p $GOPATH/src/github.com/tendermint mkdir -p $GOPATH/src/github.com/tendermint
git clone https://github.com/tendermint/tendermint $GOPATH/src/github.com/tendermint/tendermint git clone https://github.com/tendermint/tendermint $GOPATH/src/github.com/tendermint/tendermint
cd $GOPATH/src/github.com/tendermint/tendermint cd $GOPATH/src/github.com/tendermint/tendermint
make get_tools
make get_vendor_deps make get_vendor_deps
make install make install

View File

@@ -18,8 +18,8 @@ Fields
- ``power``: The validator's voting power. - ``power``: The validator's voting power.
- ``name``: Name of the validator (optional). - ``name``: Name of the validator (optional).
- ``app_hash``: The expected application hash (as returned by the - ``app_hash``: The expected application hash (as returned by the
``Commit`` ABCI message) upon genesis. If the app's hash does not ``ResponseInfo`` ABCI message) upon genesis. If the app's hash does not
match, a warning message is printed. match, Tendermint will panic.
- ``app_state``: The application state (e.g. initial distribution of tokens). - ``app_state``: The application state (e.g. initial distribution of tokens).
Sample genesis.json Sample genesis.json

View File

@@ -0,0 +1,114 @@
# Light client
A light client is a process that connects to the Tendermint Full Node(s) and then tries to verify the Merkle proofs
about the blockchain application. In this document we describe mechanisms that ensures that the Tendermint light client
has the same level of security as Full Node processes (without being itself a Full Node).
To be able to validate a Merkle proof, a light client needs to validate the blockchain header that contains the root app hash.
Validating a blockchain header in Tendermint consists in verifying that the header is committed (signed) by >2/3 of the
voting power of the corresponding validator set. As the validator set is a dynamic set (it is changing), one of the
core functionality of the light client is updating the current validator set, that is then used to verify the
blockchain header, and further the corresponding Merkle proofs.
For the purpose of this light client specification, we assume that the Tendermint Full Node exposes the following functions over
Tendermint RPC:
```golang
Header(height int64) (SignedHeader, error) // returns signed header for the given height
Validators(height int64) (ResultValidators, error) // returns validator set for the given height
LastHeader(valSetNumber int64) (SignedHeader, error) // returns last header signed by the validator set with the given validator set number
type SignedHeader struct {
Header Header
Commit Commit
ValSetNumber int64
}
type ResultValidators struct {
BlockHeight int64
Validators []Validator
// time the current validator set is initialised, i.e, time of the last validator change before header BlockHeight
ValSetTime int64
}
```
We assume that Tendermint keeps track of the validator set changes and that each time a validator set is changed it is
being assigned the next sequence number. We can call this number the validator set sequence number. Tendermint also remembers
the Time from the header when the next validator set is initialised (starts to be in power), and we refer to this time
as validator set init time.
Furthermore, we assume that each validator set change is signed (committed) by the current validator set. More precisely,
given a block `H` that contains transactions that are modifying the current validator set, the Merkle root hash of the next
validator set (modified based on transactions from block H) will be in block `H+1` (and signed by the current validator
set), and then starting from the block `H+2`, it will be signed by the next validator set.
Note that the real Tendermint RPC API is slightly different (for example, response messages contain more data and function
names are slightly different); we shortened (and modified) it for the purpose of this document to make the spec more
clear and simple. Furthermore, note that in case of the third function, the returned header has `ValSetNumber` equals to
`valSetNumber+1`.
Locally, light client manages the following state:
```golang
valSet []Validator // current validator set (last known and verified validator set)
valSetNumber int64 // sequence number of the current validator set
valSetHash []byte // hash of the current validator set
valSetTime int64 // time when the current validator set is initialised
```
The light client is initialised with the trusted validator set, for example based on the known validator set hash,
validator set sequence number and the validator set init time.
The core of the light client logic is captured by the VerifyAndUpdate function that is used to 1) verify if the given header is valid,
and 2) update the validator set (when the given header is valid and it is more recent than the seen headers).
```golang
VerifyAndUpdate(signedHeader SignedHeader):
assertThat signedHeader.valSetNumber >= valSetNumber
if isValid(signedHeader) and signedHeader.Header.Time <= valSetTime + UNBONDING_PERIOD then
setValidatorSet(signedHeader)
return true
else
updateValidatorSet(signedHeader.ValSetNumber)
return VerifyAndUpdate(signedHeader)
isValid(signedHeader SignedHeader):
valSetOfTheHeader = Validators(signedHeader.Header.Height)
assertThat Hash(valSetOfTheHeader) == signedHeader.Header.ValSetHash
assertThat signedHeader is passing basic validation
if votingPower(signedHeader.Commit) > 2/3 * votingPower(valSetOfTheHeader) then return true
else
return false
setValidatorSet(signedHeader SignedHeader):
nextValSet = Validators(signedHeader.Header.Height)
assertThat Hash(nextValSet) == signedHeader.Header.ValidatorsHash
valSet = nextValSet.Validators
valSetHash = signedHeader.Header.ValidatorsHash
valSetNumber = signedHeader.ValSetNumber
valSetTime = nextValSet.ValSetTime
votingPower(commit Commit):
votingPower = 0
for each precommit in commit.Precommits do:
if precommit.ValidatorAddress is in valSet and signature of the precommit verifies then
votingPower += valSet[precommit.ValidatorAddress].VotingPower
return votingPower
votingPower(validatorSet []Validator):
for each validator in validatorSet do:
votingPower += validator.VotingPower
return votingPower
updateValidatorSet(valSetNumberOfTheHeader):
while valSetNumber != valSetNumberOfTheHeader do
signedHeader = LastHeader(valSetNumber)
if isValid(signedHeader) then
setValidatorSet(signedHeader)
else return error
return
```
Note that in the logic above we assume that the light client will always go upward with respect to header verifications,
i.e., that it will always be used to verify more recent headers. In case a light client needs to be used to verify older
headers (go backward) the same mechanisms and similar logic can be used. In case a call to the FullNode or subsequent
checks fail, a light client need to implement some recovery strategy, for example connecting to other FullNode.

View File

@@ -83,7 +83,7 @@ The Tendermint Version Handshake allows the peers to exchange their NodeInfo:
```golang ```golang
type NodeInfo struct { type NodeInfo struct {
PubKey crypto.PubKey ID p2p.ID
Moniker string Moniker string
Network string Network string
RemoteAddr string RemoteAddr string
@@ -95,7 +95,7 @@ type NodeInfo struct {
``` ```
The connection is disconnected if: The connection is disconnected if:
- `peer.NodeInfo.PubKey != peer.PubKey` - `peer.NodeInfo.ID` is not equal `peerConn.ID`
- `peer.NodeInfo.Version` is not formatted as `X.X.X` where X are integers known as Major, Minor, and Revision - `peer.NodeInfo.Version` is not formatted as `X.X.X` where X are integers known as Major, Minor, and Revision
- `peer.NodeInfo.Version` Major is not the same as ours - `peer.NodeInfo.Version` Major is not the same as ours
- `peer.NodeInfo.Version` Minor is not the same as ours - `peer.NodeInfo.Version` Minor is not the same as ours

View File

@@ -62,6 +62,13 @@ such as the Web-of-Trust or Certificate Authorities. In our case, we can
use the blockchain itself as a certificate authority to ensure that we use the blockchain itself as a certificate authority to ensure that we
are connected to at least one validator. are connected to at least one validator.
Config
------
Authenticated encryption is enabled by default. If you wish to use another
authentication scheme or your peers are connected via VPN, you can turn it off
by setting ``auth_enc`` to ``false`` in the config file.
Additional Reading Additional Reading
------------------ ------------------

View File

@@ -74,20 +74,17 @@ RPC server, for example:
curl http://localhost:46657/broadcast_tx_commit?tx=\"abcd\" curl http://localhost:46657/broadcast_tx_commit?tx=\"abcd\"
For handling responses, we recommend you `install the jsonpp
tool <http://jmhodges.github.io/jsonpp/>`__ to pretty print the JSON.
We can see the chain's status at the ``/status`` end-point: We can see the chain's status at the ``/status`` end-point:
:: ::
curl http://localhost:46657/status | jsonpp curl http://localhost:46657/status | json_pp
and the ``latest_app_hash`` in particular: and the ``latest_app_hash`` in particular:
:: ::
curl http://localhost:46657/status | jsonpp | grep app_hash curl http://localhost:46657/status | json_pp | grep latest_app_hash
Visit http://localhost:46657 in your browser to see the list of other Visit http://localhost:46657 in your browser to see the list of other
endpoints. Some take no arguments (like ``/status``), while others endpoints. Some take no arguments (like ``/status``), while others
@@ -260,19 +257,19 @@ When ``tendermint init`` is run, both a ``genesis.json`` and
:: ::
{ {
"app_hash": "", "validators" : [
"chain_id": "test-chain-HZw6TB",
"genesis_time": "0001-01-01T00:00:00.000Z",
"validators": [
{ {
"power": 10, "pub_key" : {
"name": "", "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=",
"pub_key": [ "type" : "AC26791624DE60"
1, },
"5770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E" "power" : 10,
] "name" : ""
} }
] ],
"app_hash" : "",
"chain_id" : "test-chain-rDlYSN",
"genesis_time" : "0001-01-01T00:00:00Z"
} }
And the ``priv_validator.json``: And the ``priv_validator.json``:
@@ -280,20 +277,18 @@ And the ``priv_validator.json``:
:: ::
{ {
"address": "4F4D895F882A18E1D1FC608D102601DA8D3570E5", "last_step" : 0,
"last_height": 0, "last_round" : 0,
"last_round": 0, "address" : "B788DEDE4F50AD8BC9462DE76741CCAFF87D51E2",
"last_signature": null, "pub_key" : {
"last_signbytes": "", "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=",
"last_step": 0, "type" : "AC26791624DE60"
"priv_key": [ },
1, "last_height" : 0,
"F9FA3CD435BDAE54D0BCA8F1BC289D718C23D855C6DB21E8543F5E4F457E62805770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E" "priv_key" : {
], "value" : "JPivl82x+LfVkp8i3ztoTjY6c6GJ4pBxQexErOCyhwqHeGT5ATxzpAtPJKnxNx/NyUnD8Ebv3OIYH+kgD4N88Q==",
"pub_key": [ "type" : "954568A3288910"
1, }
"5770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E"
]
} }
The ``priv_validator.json`` actually contains a private key, and should The ``priv_validator.json`` actually contains a private key, and should
@@ -334,14 +329,14 @@ For instance,
:: ::
tendermint node --p2p.seeds "1.2.3.4:46656,5.6.7.8:46656" tendermint node --p2p.seeds "f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:46656,0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:46656"
Alternatively, you can use the ``/dial_seeds`` endpoint of the RPC to Alternatively, you can use the ``/dial_seeds`` endpoint of the RPC to
specify seeds for a running node to connect to: specify seeds for a running node to connect to:
:: ::
curl 'localhost:46657/dial_seeds?seeds=\["1.2.3.4:46656","5.6.7.8:46656"\]' curl 'localhost:46657/dial_seeds?seeds=\["f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:46656","0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:46656"\]'
Note, if the peer-exchange protocol (PEX) is enabled (default), you should not Note, if the peer-exchange protocol (PEX) is enabled (default), you should not
normally need seeds after the first start. Peers will be gossipping about known normally need seeds after the first start. Peers will be gossipping about known
@@ -355,8 +350,8 @@ core instance.
:: ::
tendermint node --p2p.persistent_peers "10.11.12.13:46656,10.11.12.14:46656" tendermint node --p2p.persistent_peers "429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:46656,96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:46656"
curl 'localhost:46657/dial_peers?persistent=true&peers=\["1.2.3.4:46656","5.6.7.8:46656"\]' curl 'localhost:46657/dial_peers?persistent=true&peers=\["429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:46656","96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:46656"\]'
Adding a Non-Validator Adding a Non-Validator
~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~
@@ -387,20 +382,18 @@ Now we can update our genesis file. For instance, if the new
:: ::
{ {
"address": "AC379688105901436A34A65F185C115B8BB277A1", "address" : "5AF49D2A2D4F5AD4C7C8C4CC2FB020131E9C4902",
"last_height": 0, "pub_key" : {
"last_round": 0, "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=",
"last_signature": null, "type" : "AC26791624DE60"
"last_signbytes": "", },
"last_step": 0, "priv_key" : {
"priv_key": [ "value" : "EDJY9W6zlAw+su6ITgTKg2nTZcHAH1NMTW5iwlgmNDuX1f35+OR4HMN88ZtQzsAwhETq4k3vzM3n6WTk5ii16Q==",
1, "type" : "954568A3288910"
"0D2ED337D748ADF79BE28559B9E59EBE1ABBA0BAFE6D65FCB9797985329B950C8F2B5AACAACC9FCE41881349743B0CFDE190DF0177744568D4E82A18F0B7DF94" },
], "last_step" : 0,
"pub_key": [ "last_round" : 0,
1, "last_height" : 0
"8F2B5AACAACC9FCE41881349743B0CFDE190DF0177744568D4E82A18F0B7DF94"
]
} }
then the new ``genesis.json`` will be: then the new ``genesis.json`` will be:
@@ -408,27 +401,27 @@ then the new ``genesis.json`` will be:
:: ::
{ {
"app_hash": "", "validators" : [
"chain_id": "test-chain-HZw6TB",
"genesis_time": "0001-01-01T00:00:00.000Z",
"validators": [
{ {
"power": 10, "pub_key" : {
"name": "", "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=",
"pub_key": [ "type" : "AC26791624DE60"
1, },
"5770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E" "power" : 10,
] "name" : ""
}, },
{ {
"power": 10, "pub_key" : {
"name": "", "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=",
"pub_key": [ "type" : "AC26791624DE60"
1, },
"8F2B5AACAACC9FCE41881349743B0CFDE190DF0177744568D4E82A18F0B7DF94" "power" : 10,
] "name" : ""
} }
] ],
"app_hash" : "",
"chain_id" : "test-chain-rDlYSN",
"genesis_time" : "0001-01-01T00:00:00Z"
} }
Update the ``genesis.json`` in ``~/.tendermint/config``. Copy the genesis file Update the ``genesis.json`` in ``~/.tendermint/config``. Copy the genesis file

View File

@@ -1,12 +1,11 @@
package evidence package evidence
import ( import (
"bytes"
"fmt" "fmt"
"reflect" "reflect"
"time" "time"
wire "github.com/tendermint/go-wire" amino "github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
"github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p"
@@ -16,7 +15,7 @@ import (
const ( const (
EvidenceChannel = byte(0x38) EvidenceChannel = byte(0x38)
maxEvidenceMessageSize = 1048576 // 1MB TODO make it configurable maxMsgSize = 1048576 // 1MB TODO make it configurable
broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often
) )
@@ -68,7 +67,7 @@ func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) {
// the rest will be sent by the broadcastRoutine // the rest will be sent by the broadcastRoutine
evidences := evR.evpool.PriorityEvidence() evidences := evR.evpool.PriorityEvidence()
msg := &EvidenceListMessage{evidences} msg := &EvidenceListMessage{evidences}
success := peer.Send(EvidenceChannel, struct{ EvidenceMessage }{msg}) success := peer.Send(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
if !success { if !success {
// TODO: remove peer ? // TODO: remove peer ?
} }
@@ -82,7 +81,7 @@ func (evR *EvidenceReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// Receive implements Reactor. // Receive implements Reactor.
// It adds any received evidence to the evpool. // It adds any received evidence to the evpool.
func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes) msg, err := DecodeMessage(msgBytes)
if err != nil { if err != nil {
evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
evR.Switch.StopPeerForError(src, err) evR.Switch.StopPeerForError(src, err)
@@ -119,7 +118,7 @@ func (evR *EvidenceReactor) broadcastRoutine() {
case evidence := <-evR.evpool.EvidenceChan(): case evidence := <-evR.evpool.EvidenceChan():
// broadcast some new evidence // broadcast some new evidence
msg := &EvidenceListMessage{[]types.Evidence{evidence}} msg := &EvidenceListMessage{[]types.Evidence{evidence}}
evR.Switch.Broadcast(EvidenceChannel, struct{ EvidenceMessage }{msg}) evR.Switch.Broadcast(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
// TODO: Broadcast runs asynchronously, so this should wait on the successChan // TODO: Broadcast runs asynchronously, so this should wait on the successChan
// in another routine before marking to be proper. // in another routine before marking to be proper.
@@ -127,7 +126,7 @@ func (evR *EvidenceReactor) broadcastRoutine() {
case <-ticker.C: case <-ticker.C:
// broadcast all pending evidence // broadcast all pending evidence
msg := &EvidenceListMessage{evR.evpool.PendingEvidence()} msg := &EvidenceListMessage{evR.evpool.PendingEvidence()}
evR.Switch.Broadcast(EvidenceChannel, struct{ EvidenceMessage }{msg}) evR.Switch.Broadcast(EvidenceChannel, cdc.MustMarshalBinaryBare(msg))
case <-evR.Quit(): case <-evR.Quit():
return return
} }
@@ -137,24 +136,22 @@ func (evR *EvidenceReactor) broadcastRoutine() {
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Messages // Messages
const (
msgTypeEvidence = byte(0x01)
)
// EvidenceMessage is a message sent or received by the EvidenceReactor. // EvidenceMessage is a message sent or received by the EvidenceReactor.
type EvidenceMessage interface{} type EvidenceMessage interface{}
var _ = wire.RegisterInterface( func RegisterEvidenceMessages(cdc *amino.Codec) {
struct{ EvidenceMessage }{}, cdc.RegisterInterface((*EvidenceMessage)(nil), nil)
wire.ConcreteType{&EvidenceListMessage{}, msgTypeEvidence}, cdc.RegisterConcrete(&EvidenceListMessage{},
) "tendermint/evidence/EvidenceListMessage", nil)
}
// DecodeMessage decodes a byte-array into a EvidenceMessage. // DecodeMessage decodes a byte-array into a EvidenceMessage.
func DecodeMessage(bz []byte) (msgType byte, msg EvidenceMessage, err error) { func DecodeMessage(bz []byte) (msg EvidenceMessage, err error) {
msgType = bz[0] if len(bz) > maxMsgSize {
n := new(int) return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
r := bytes.NewReader(bz) len(bz), maxMsgSize)
msg = wire.ReadBinary(struct{ EvidenceMessage }{}, r, maxEvidenceMessageSize, n, &err).(struct{ EvidenceMessage }).EvidenceMessage }
err = cdc.UnmarshalBinaryBare(bz, &msg)
return return
} }

View File

@@ -3,7 +3,6 @@ package evidence
import ( import (
"fmt" "fmt"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
) )
@@ -104,7 +103,10 @@ func (store *EvidenceStore) ListEvidence(prefixKey string) (evidence []types.Evi
val := iter.Value() val := iter.Value()
var ei EvidenceInfo var ei EvidenceInfo
wire.ReadBinaryBytes(val, &ei) err := cdc.UnmarshalBinaryBare(val, &ei)
if err != nil {
panic(err)
}
evidence = append(evidence, ei.Evidence) evidence = append(evidence, ei.Evidence)
} }
return evidence return evidence
@@ -119,7 +121,10 @@ func (store *EvidenceStore) GetEvidence(height int64, hash []byte) *EvidenceInfo
return nil return nil
} }
var ei EvidenceInfo var ei EvidenceInfo
wire.ReadBinaryBytes(val, &ei) err := cdc.UnmarshalBinaryBare(val, &ei)
if err != nil {
panic(err)
}
return &ei return &ei
} }
@@ -137,7 +142,7 @@ func (store *EvidenceStore) AddNewEvidence(evidence types.Evidence, priority int
Priority: priority, Priority: priority,
Evidence: evidence, Evidence: evidence,
} }
eiBytes := wire.BinaryBytes(ei) eiBytes := cdc.MustMarshalBinaryBare(ei)
// add it to the store // add it to the store
key := keyOutqueue(evidence, priority) key := keyOutqueue(evidence, priority)
@@ -171,7 +176,7 @@ func (store *EvidenceStore) MarkEvidenceAsCommitted(evidence types.Evidence) {
ei.Committed = true ei.Committed = true
lookupKey := keyLookup(evidence) lookupKey := keyLookup(evidence)
store.db.SetSync(lookupKey, wire.BinaryBytes(ei)) store.db.SetSync(lookupKey, cdc.MustMarshalBinaryBare(ei))
} }
//--------------------------------------------------- //---------------------------------------------------
@@ -181,6 +186,9 @@ func (store *EvidenceStore) getEvidenceInfo(evidence types.Evidence) EvidenceInf
key := keyLookup(evidence) key := keyLookup(evidence)
var ei EvidenceInfo var ei EvidenceInfo
b := store.db.Get(key) b := store.db.Get(key)
wire.ReadBinaryBytes(b, &ei) err := cdc.UnmarshalBinaryBare(b, &ei)
if err != nil {
panic(err)
}
return ei return ei
} }

View File

@@ -4,7 +4,6 @@ import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
) )
@@ -108,15 +107,3 @@ func TestStorePriority(t *testing.T) {
assert.Equal(ev, cases[i].ev) assert.Equal(ev, cases[i].ev)
} }
} }
//-------------------------------------------
const (
evidenceTypeMockGood = byte(0x01)
evidenceTypeMockBad = byte(0x02)
)
var _ = wire.RegisterInterface(
struct{ types.Evidence }{},
wire.ConcreteType{types.MockGoodEvidence{}, evidenceTypeMockGood},
wire.ConcreteType{types.MockBadEvidence{}, evidenceTypeMockBad},
)

25
evidence/wire.go Normal file
View File

@@ -0,0 +1,25 @@
package evidence
import (
amino "github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/types"
)
var cdc = amino.NewCodec()
func init() {
RegisterEvidenceMessages(cdc)
crypto.RegisterAmino(cdc)
types.RegisterEvidences(cdc)
RegisterMockEvidences(cdc) // For testing
}
//-------------------------------------------
func RegisterMockEvidences(cdc *amino.Codec) {
cdc.RegisterConcrete(types.MockGoodEvidence{},
"tendermint/MockGoodEvidence", nil)
cdc.RegisterConcrete(types.MockBadEvidence{},
"tendermint/MockBadEvidence", nil)
}

View File

@@ -93,7 +93,7 @@ func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return p.node.Commit(&status.LatestBlockHeight) return p.node.Commit(&status.SyncInfo.LatestBlockHeight)
} }
// CommitFromResult ... // CommitFromResult ...

View File

@@ -1,13 +1,11 @@
package files package files
import ( import (
"encoding/json" "io/ioutil"
"os" "os"
"github.com/pkg/errors" "github.com/pkg/errors"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/lite" "github.com/tendermint/tendermint/lite"
liteErr "github.com/tendermint/tendermint/lite/errors" liteErr "github.com/tendermint/tendermint/lite/errors"
) )
@@ -19,7 +17,7 @@ const (
MaxFullCommitSize = 1024 * 1024 MaxFullCommitSize = 1024 * 1024
) )
// SaveFullCommit exports the seed in binary / go-wire style // SaveFullCommit exports the seed in binary / go-amino style
func SaveFullCommit(fc lite.FullCommit, path string) error { func SaveFullCommit(fc lite.FullCommit, path string) error {
f, err := os.Create(path) f, err := os.Create(path)
if err != nil { if err != nil {
@@ -27,9 +25,11 @@ func SaveFullCommit(fc lite.FullCommit, path string) error {
} }
defer f.Close() defer f.Close()
var n int _, err = cdc.MarshalBinaryWriter(f, fc)
wire.WriteBinary(fc, f, &n, &err) if err != nil {
return errors.WithStack(err) return errors.WithStack(err)
}
return nil
} }
// SaveFullCommitJSON exports the seed in a json format // SaveFullCommitJSON exports the seed in a json format
@@ -39,9 +39,15 @@ func SaveFullCommitJSON(fc lite.FullCommit, path string) error {
return errors.WithStack(err) return errors.WithStack(err)
} }
defer f.Close() defer f.Close()
stream := json.NewEncoder(f) bz, err := cdc.MarshalJSON(fc)
err = stream.Encode(fc) if err != nil {
return errors.WithStack(err) return errors.WithStack(err)
}
_, err = f.Write(bz)
if err != nil {
return errors.WithStack(err)
}
return nil
} }
// LoadFullCommit loads the full commit from the file system. // LoadFullCommit loads the full commit from the file system.
@@ -56,9 +62,11 @@ func LoadFullCommit(path string) (lite.FullCommit, error) {
} }
defer f.Close() defer f.Close()
var n int _, err = cdc.UnmarshalBinaryReader(f, &fc, 0)
wire.ReadBinaryPtr(&fc, f, MaxFullCommitSize, &n, &err) if err != nil {
return fc, errors.WithStack(err) return fc, errors.WithStack(err)
}
return fc, nil
} }
// LoadFullCommitJSON loads the commit from the file system in JSON format. // LoadFullCommitJSON loads the commit from the file system in JSON format.
@@ -73,7 +81,13 @@ func LoadFullCommitJSON(path string) (lite.FullCommit, error) {
} }
defer f.Close() defer f.Close()
stream := json.NewDecoder(f) bz, err := ioutil.ReadAll(f)
err = stream.Decode(&fc) if err != nil {
return fc, errors.WithStack(err) return fc, errors.WithStack(err)
}
err = cdc.UnmarshalJSON(bz, &fc)
if err != nil {
return fc, errors.WithStack(err)
}
return fc, nil
} }

12
lite/files/wire.go Normal file
View File

@@ -0,0 +1,12 @@
package files
import (
amino "github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
)
var cdc = amino.NewCodec()
func init() {
crypto.RegisterAmino(cdc)
}

View File

@@ -23,7 +23,7 @@ type ValKeys []crypto.PrivKey
func GenValKeys(n int) ValKeys { func GenValKeys(n int) ValKeys {
res := make(ValKeys, n) res := make(ValKeys, n)
for i := range res { for i := range res {
res[i] = crypto.GenPrivKeyEd25519().Wrap() res[i] = crypto.GenPrivKeyEd25519()
} }
return res return res
} }
@@ -32,7 +32,7 @@ func GenValKeys(n int) ValKeys {
func (v ValKeys) Change(i int) ValKeys { func (v ValKeys) Change(i int) ValKeys {
res := make(ValKeys, len(v)) res := make(ValKeys, len(v))
copy(res, v) copy(res, v)
res[i] = crypto.GenPrivKeyEd25519().Wrap() res[i] = crypto.GenPrivKeyEd25519()
return res return res
} }
@@ -46,7 +46,7 @@ func (v ValKeys) Extend(n int) ValKeys {
func GenSecpValKeys(n int) ValKeys { func GenSecpValKeys(n int) ValKeys {
res := make(ValKeys, n) res := make(ValKeys, n)
for i := range res { for i := range res {
res[i] = crypto.GenPrivKeySecp256k1().Wrap() res[i] = crypto.GenPrivKeySecp256k1()
} }
return res return res
} }

View File

@@ -11,11 +11,17 @@ import (
) )
func ValidateBlockMeta(meta *types.BlockMeta, check lite.Commit) error { func ValidateBlockMeta(meta *types.BlockMeta, check lite.Commit) error {
if meta == nil {
return errors.New("expecting a non-nil BlockMeta")
}
// TODO: check the BlockID?? // TODO: check the BlockID??
return ValidateHeader(meta.Header, check) return ValidateHeader(meta.Header, check)
} }
func ValidateBlock(meta *types.Block, check lite.Commit) error { func ValidateBlock(meta *types.Block, check lite.Commit) error {
if meta == nil {
return errors.New("expecting a non-nil Block")
}
err := ValidateHeader(meta.Header, check) err := ValidateHeader(meta.Header, check)
if err != nil { if err != nil {
return err return err
@@ -27,6 +33,9 @@ func ValidateBlock(meta *types.Block, check lite.Commit) error {
} }
func ValidateHeader(head *types.Header, check lite.Commit) error { func ValidateHeader(head *types.Header, check lite.Commit) error {
if head == nil {
return errors.New("expecting a non-nil Header")
}
// make sure they are for the same height (obvious fail) // make sure they are for the same height (obvious fail)
if head.Height != check.Height() { if head.Height != check.Height() {
return certerr.ErrHeightMismatch(head.Height, check.Height()) return certerr.ErrHeightMismatch(head.Height, check.Height())

View File

@@ -3,10 +3,12 @@ package proxy
import ( import (
"net/http" "net/http"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
rpcclient "github.com/tendermint/tendermint/rpc/client" rpcclient "github.com/tendermint/tendermint/rpc/client"
"github.com/tendermint/tendermint/rpc/core" "github.com/tendermint/tendermint/rpc/core"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
rpc "github.com/tendermint/tendermint/rpc/lib/server" rpc "github.com/tendermint/tendermint/rpc/lib/server"
) )
@@ -23,13 +25,15 @@ func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger) error
return err return err
} }
cdc := amino.NewCodec()
ctypes.RegisterAmino(cdc)
r := RPCRoutes(c) r := RPCRoutes(c)
// build the handler... // build the handler...
mux := http.NewServeMux() mux := http.NewServeMux()
rpc.RegisterRPCFuncs(mux, r, logger) rpc.RegisterRPCFuncs(mux, r, cdc, logger)
wm := rpc.NewWebsocketManager(r, rpc.EventSubscriber(c)) wm := rpc.NewWebsocketManager(r, cdc, rpc.EventSubscriber(c))
wm.SetLogger(logger) wm.SetLogger(logger)
core.SetLogger(logger) core.SetLogger(logger)
mux.HandleFunc(wsEndpoint, wm.WebsocketHandler) mux.HandleFunc(wsEndpoint, wm.WebsocketHandler)

218
lite/proxy/validate_test.go Normal file
View File

@@ -0,0 +1,218 @@
package proxy_test
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/lite"
"github.com/tendermint/tendermint/lite/proxy"
"github.com/tendermint/tendermint/types"
)
var (
deadBeefTxs = types.Txs{[]byte("DE"), []byte("AD"), []byte("BE"), []byte("EF")}
deadBeefHash = deadBeefTxs.Hash()
testTime1 = time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC)
testTime2 = time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC)
)
var hdrHeight11 = &types.Header{
Height: 11,
Time: testTime1,
ValidatorsHash: []byte("Tendermint"),
}
func TestValidateBlock(t *testing.T) {
tests := []struct {
block *types.Block
commit lite.Commit
wantErr string
}{
{
block: nil, wantErr: "non-nil Block",
},
{
block: &types.Block{}, wantErr: "nil Header",
},
{
block: &types.Block{Header: new(types.Header)},
},
// Start Header.Height mismatch test
{
block: &types.Block{Header: &types.Header{Height: 10}},
commit: lite.Commit{Header: &types.Header{Height: 11}},
wantErr: "don't match - 10 vs 11",
},
{
block: &types.Block{Header: &types.Header{Height: 11}},
commit: lite.Commit{Header: &types.Header{Height: 11}},
},
// End Header.Height mismatch test
// Start Header.Hash mismatch test
{
block: &types.Block{Header: hdrHeight11},
commit: lite.Commit{Header: &types.Header{Height: 11}},
wantErr: "Headers don't match",
},
{
block: &types.Block{Header: hdrHeight11},
commit: lite.Commit{Header: hdrHeight11},
},
// End Header.Hash mismatch test
// Start Header.Data hash mismatch test
{
block: &types.Block{
Header: &types.Header{Height: 11},
Data: &types.Data{Txs: []types.Tx{[]byte("0xDE"), []byte("AD")}},
},
commit: lite.Commit{
Header: &types.Header{Height: 11},
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("0xDEADBEEF")}},
},
wantErr: "Data hash doesn't match header",
},
{
block: &types.Block{
Header: &types.Header{Height: 11, DataHash: deadBeefHash},
Data: &types.Data{Txs: deadBeefTxs},
},
commit: lite.Commit{
Header: &types.Header{Height: 11},
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}},
},
},
// End Header.Data hash mismatch test
}
for i, tt := range tests {
err := proxy.ValidateBlock(tt.block, tt.commit)
if tt.wantErr != "" {
if err == nil {
assert.FailNowf(t, "Unexpectedly passed", "#%d", i)
} else {
assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i)
}
continue
}
assert.Nil(t, err, "#%d: expecting a nil error", i)
}
}
func TestValidateBlockMeta(t *testing.T) {
tests := []struct {
meta *types.BlockMeta
commit lite.Commit
wantErr string
}{
{
meta: nil, wantErr: "non-nil BlockMeta",
},
{
meta: &types.BlockMeta{}, wantErr: "non-nil Header",
},
{
meta: &types.BlockMeta{Header: new(types.Header)},
},
// Start Header.Height mismatch test
{
meta: &types.BlockMeta{Header: &types.Header{Height: 10}},
commit: lite.Commit{Header: &types.Header{Height: 11}},
wantErr: "don't match - 10 vs 11",
},
{
meta: &types.BlockMeta{Header: &types.Header{Height: 11}},
commit: lite.Commit{Header: &types.Header{Height: 11}},
},
// End Header.Height mismatch test
// Start Headers don't match test
{
meta: &types.BlockMeta{Header: hdrHeight11},
commit: lite.Commit{Header: &types.Header{Height: 11}},
wantErr: "Headers don't match",
},
{
meta: &types.BlockMeta{Header: hdrHeight11},
commit: lite.Commit{Header: hdrHeight11},
},
{
meta: &types.BlockMeta{
Header: &types.Header{
Height: 11,
ValidatorsHash: []byte("lite-test"),
// TODO: should be able to use empty time after Amino upgrade
Time: testTime1,
},
},
commit: lite.Commit{
Header: &types.Header{Height: 11, DataHash: deadBeefHash},
},
wantErr: "Headers don't match",
},
{
meta: &types.BlockMeta{
Header: &types.Header{
Height: 11, DataHash: deadBeefHash,
ValidatorsHash: []byte("Tendermint"),
Time: testTime1,
},
},
commit: lite.Commit{
Header: &types.Header{
Height: 11, DataHash: deadBeefHash,
ValidatorsHash: []byte("Tendermint"),
Time: testTime2,
},
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}},
},
wantErr: "Headers don't match",
},
{
meta: &types.BlockMeta{
Header: &types.Header{
Height: 11, DataHash: deadBeefHash,
ValidatorsHash: []byte("Tendermint"),
Time: testTime2,
},
},
commit: lite.Commit{
Header: &types.Header{
Height: 11, DataHash: deadBeefHash,
ValidatorsHash: []byte("Tendermint-x"),
Time: testTime2,
},
Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}},
},
wantErr: "Headers don't match",
},
// End Headers don't match test
}
for i, tt := range tests {
err := proxy.ValidateBlockMeta(tt.meta, tt.commit)
if tt.wantErr != "" {
if err == nil {
assert.FailNowf(t, "Unexpectedly passed", "#%d: wanted error %q", i, tt.wantErr)
} else {
assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i)
}
continue
}
assert.Nil(t, err, "#%d: expecting a nil error", i)
}
}

View File

@@ -146,7 +146,7 @@ func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) {
// } // }
// // check to validate it if possible, and drop if not valid // // check to validate it if possible, and drop if not valid
// switch t := tm.Unwrap().(type) { // switch t := tm.(type) {
// case types.EventDataNewBlockHeader: // case types.EventDataNewBlockHeader:
// err := verifyHeader(s.client, t.Header) // err := verifyHeader(s.client, t.Header)
// if err != nil { // if err != nil {

View File

@@ -1,13 +1,12 @@
package mempool package mempool
import ( import (
"bytes"
"fmt" "fmt"
"reflect" "reflect"
"time" "time"
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
wire "github.com/tendermint/go-wire" amino "github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/clist" "github.com/tendermint/tmlibs/clist"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@@ -19,7 +18,7 @@ import (
const ( const (
MempoolChannel = byte(0x30) MempoolChannel = byte(0x30)
maxMempoolMessageSize = 1048576 // 1MB TODO make it configurable maxMsgSize = 1048576 // 1MB TODO make it configurable
peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount
) )
@@ -71,7 +70,7 @@ func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
// Receive implements Reactor. // Receive implements Reactor.
// It adds any received transactions to the mempool. // It adds any received transactions to the mempool.
func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
_, msg, err := DecodeMessage(msgBytes) msg, err := DecodeMessage(msgBytes)
if err != nil { if err != nil {
memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
memR.Switch.StopPeerForError(src, err) memR.Switch.StopPeerForError(src, err)
@@ -137,7 +136,7 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
} }
// send memTx // send memTx
msg := &TxMessage{Tx: memTx.tx} msg := &TxMessage{Tx: memTx.tx}
success := peer.Send(MempoolChannel, struct{ MempoolMessage }{msg}) success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg))
if !success { if !success {
time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond)
continue continue
@@ -158,24 +157,21 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) {
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Messages // Messages
const (
msgTypeTx = byte(0x01)
)
// MempoolMessage is a message sent or received by the MempoolReactor. // MempoolMessage is a message sent or received by the MempoolReactor.
type MempoolMessage interface{} type MempoolMessage interface{}
var _ = wire.RegisterInterface( func RegisterMempoolMessages(cdc *amino.Codec) {
struct{ MempoolMessage }{}, cdc.RegisterInterface((*MempoolMessage)(nil), nil)
wire.ConcreteType{&TxMessage{}, msgTypeTx}, cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil)
) }
// DecodeMessage decodes a byte-array into a MempoolMessage. // DecodeMessage decodes a byte-array into a MempoolMessage.
func DecodeMessage(bz []byte) (msgType byte, msg MempoolMessage, err error) { func DecodeMessage(bz []byte) (msg MempoolMessage, err error) {
msgType = bz[0] if len(bz) > maxMsgSize {
n := new(int) return msg, fmt.Errorf("Msg exceeds max size (%d > %d)",
r := bytes.NewReader(bz) len(bz), maxMsgSize)
msg = wire.ReadBinary(struct{ MempoolMessage }{}, r, maxMempoolMessageSize, n, &err).(struct{ MempoolMessage }).MempoolMessage }
err = cdc.UnmarshalBinaryBare(bz, &msg)
return return
} }

9
mempool/wire.go Normal file
View File

@@ -0,0 +1,9 @@
package mempool
import amino "github.com/tendermint/go-amino"
var cdc = amino.NewCodec()
func init() {
RegisterMempoolMessages(cdc)
}

View File

@@ -2,16 +2,14 @@ package node
import ( import (
"bytes" "bytes"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
"strings"
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
amino "github.com/tendermint/go-amino"
crypto "github.com/tendermint/go-crypto" crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@@ -26,6 +24,7 @@ import (
"github.com/tendermint/tendermint/p2p/trust" "github.com/tendermint/tendermint/p2p/trust"
"github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/proxy"
rpccore "github.com/tendermint/tendermint/rpc/core" rpccore "github.com/tendermint/tendermint/rpc/core"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
grpccore "github.com/tendermint/tendermint/rpc/grpc" grpccore "github.com/tendermint/tendermint/rpc/grpc"
rpc "github.com/tendermint/tendermint/rpc/lib" rpc "github.com/tendermint/tendermint/rpc/lib"
rpcserver "github.com/tendermint/tendermint/rpc/lib/server" rpcserver "github.com/tendermint/tendermint/rpc/lib/server"
@@ -34,7 +33,7 @@ import (
"github.com/tendermint/tendermint/state/txindex/kv" "github.com/tendermint/tendermint/state/txindex/kv"
"github.com/tendermint/tendermint/state/txindex/null" "github.com/tendermint/tendermint/state/txindex/null"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
priv_val "github.com/tendermint/tendermint/types/priv_validator" pvm "github.com/tendermint/tendermint/types/priv_validator"
"github.com/tendermint/tendermint/version" "github.com/tendermint/tendermint/version"
_ "net/http/pprof" _ "net/http/pprof"
@@ -79,7 +78,7 @@ type NodeProvider func(*cfg.Config, log.Logger) (*Node, error)
// It implements NodeProvider. // It implements NodeProvider.
func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
return NewNode(config, return NewNode(config,
types.LoadOrGenPrivValidatorFS(config.PrivValidatorFile()), pvm.LoadOrGenFilePV(config.PrivValidatorFile()),
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
DefaultGenesisDocProviderFunc(config), DefaultGenesisDocProviderFunc(config),
DefaultDBProvider, DefaultDBProvider,
@@ -180,8 +179,8 @@ func NewNode(config *cfg.Config,
// TODO: persist this key so external signer // TODO: persist this key so external signer
// can actually authenticate us // can actually authenticate us
privKey = crypto.GenPrivKeyEd25519() privKey = crypto.GenPrivKeyEd25519()
pvsc = priv_val.NewSocketClient( pvsc = pvm.NewSocketPV(
logger.With("module", "priv_val"), logger.With("module", "pvm"),
config.PrivValidatorListenAddr, config.PrivValidatorListenAddr,
privKey, privKey,
) )
@@ -277,19 +276,11 @@ func NewNode(config *cfg.Config,
trustMetricStore = trust.NewTrustMetricStore(trustHistoryDB, trust.DefaultConfig()) trustMetricStore = trust.NewTrustMetricStore(trustHistoryDB, trust.DefaultConfig())
trustMetricStore.SetLogger(p2pLogger) trustMetricStore.SetLogger(p2pLogger)
var seeds []string
if config.P2P.Seeds != "" {
seeds = strings.Split(config.P2P.Seeds, ",")
}
var privatePeerIDs []string
if config.P2P.PrivatePeerIDs != "" {
privatePeerIDs = strings.Split(config.P2P.PrivatePeerIDs, ",")
}
pexReactor := pex.NewPEXReactor(addrBook, pexReactor := pex.NewPEXReactor(addrBook,
&pex.PEXReactorConfig{ &pex.PEXReactorConfig{
Seeds: seeds, Seeds: cmn.SplitAndTrim(config.P2P.Seeds, ",", " "),
SeedMode: config.P2P.SeedMode, SeedMode: config.P2P.SeedMode,
PrivatePeerIDs: privatePeerIDs}) PrivatePeerIDs: cmn.SplitAndTrim(config.P2P.PrivatePeerIDs, ",", " ")})
pexReactor.SetLogger(p2pLogger) pexReactor.SetLogger(p2pLogger)
sw.AddReactor("PEX", pexReactor) sw.AddReactor("PEX", pexReactor)
} }
@@ -339,7 +330,7 @@ func NewNode(config *cfg.Config,
return nil, err return nil, err
} }
if config.TxIndex.IndexTags != "" { if config.TxIndex.IndexTags != "" {
txIndexer = kv.NewTxIndex(store, kv.IndexTags(strings.Split(config.TxIndex.IndexTags, ","))) txIndexer = kv.NewTxIndex(store, kv.IndexTags(cmn.SplitAndTrim(config.TxIndex.IndexTags, ",", " ")))
} else if config.TxIndex.IndexAllTags { } else if config.TxIndex.IndexAllTags {
txIndexer = kv.NewTxIndex(store, kv.IndexAllTags()) txIndexer = kv.NewTxIndex(store, kv.IndexAllTags())
} else { } else {
@@ -414,9 +405,14 @@ func (n *Node) OnStart() error {
} }
n.Logger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", n.config.NodeKeyFile()) n.Logger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", n.config.NodeKeyFile())
// Start the switch nodeInfo := n.makeNodeInfo(nodeKey.ID())
n.sw.SetNodeInfo(n.makeNodeInfo(nodeKey.PubKey())) n.sw.SetNodeInfo(nodeInfo)
n.sw.SetNodeKey(nodeKey) n.sw.SetNodeKey(nodeKey)
// Add ourselves to addrbook to prevent dialing ourselves
n.addrBook.AddOurAddress(nodeInfo.NetAddress())
// Start the switch
err = n.sw.Start() err = n.sw.Start()
if err != nil { if err != nil {
return err return err
@@ -424,7 +420,7 @@ func (n *Node) OnStart() error {
// Always connect to persistent peers // Always connect to persistent peers
if n.config.P2P.PersistentPeers != "" { if n.config.P2P.PersistentPeers != "" {
err = n.sw.DialPeersAsync(n.addrBook, strings.Split(n.config.P2P.PersistentPeers, ","), true) err = n.sw.DialPeersAsync(n.addrBook, cmn.SplitAndTrim(n.config.P2P.PersistentPeers, ",", " "), true)
if err != nil { if err != nil {
return err return err
} }
@@ -452,7 +448,7 @@ func (n *Node) OnStop() {
n.eventBus.Stop() n.eventBus.Stop()
n.indexerService.Stop() n.indexerService.Stop()
if pvsc, ok := n.privValidator.(*priv_val.SocketClient); ok { if pvsc, ok := n.privValidator.(*pvm.SocketPV); ok {
if err := pvsc.Stop(); err != nil { if err := pvsc.Stop(); err != nil {
n.Logger.Error("Error stopping priv validator socket client", "err", err) n.Logger.Error("Error stopping priv validator socket client", "err", err)
} }
@@ -495,7 +491,9 @@ func (n *Node) ConfigureRPC() {
func (n *Node) startRPC() ([]net.Listener, error) { func (n *Node) startRPC() ([]net.Listener, error) {
n.ConfigureRPC() n.ConfigureRPC()
listenAddrs := strings.Split(n.config.RPC.ListenAddress, ",") listenAddrs := cmn.SplitAndTrim(n.config.RPC.ListenAddress, ",", " ")
coreCodec := amino.NewCodec()
ctypes.RegisterAmino(coreCodec)
if n.config.RPC.Unsafe { if n.config.RPC.Unsafe {
rpccore.AddUnsafeRoutes() rpccore.AddUnsafeRoutes()
@@ -506,10 +504,10 @@ func (n *Node) startRPC() ([]net.Listener, error) {
for i, listenAddr := range listenAddrs { for i, listenAddr := range listenAddrs {
mux := http.NewServeMux() mux := http.NewServeMux()
rpcLogger := n.Logger.With("module", "rpc-server") rpcLogger := n.Logger.With("module", "rpc-server")
wm := rpcserver.NewWebsocketManager(rpccore.Routes, rpcserver.EventSubscriber(n.eventBus)) wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec, rpcserver.EventSubscriber(n.eventBus))
wm.SetLogger(rpcLogger.With("protocol", "websocket")) wm.SetLogger(rpcLogger.With("protocol", "websocket"))
mux.HandleFunc("/websocket", wm.WebsocketHandler) mux.HandleFunc("/websocket", wm.WebsocketHandler)
rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger)
listener, err := rpcserver.StartHTTPServer(listenAddr, mux, rpcLogger) listener, err := rpcserver.StartHTTPServer(listenAddr, mux, rpcLogger)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -581,13 +579,13 @@ func (n *Node) ProxyApp() proxy.AppConns {
return n.proxyApp return n.proxyApp
} }
func (n *Node) makeNodeInfo(pubKey crypto.PubKey) p2p.NodeInfo { func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo {
txIndexerStatus := "on" txIndexerStatus := "on"
if _, ok := n.txIndexer.(*null.TxIndex); ok { if _, ok := n.txIndexer.(*null.TxIndex); ok {
txIndexerStatus = "off" txIndexerStatus = "off"
} }
nodeInfo := p2p.NodeInfo{ nodeInfo := p2p.NodeInfo{
PubKey: pubKey, ID: nodeID,
Network: n.genesisDoc.ChainID, Network: n.genesisDoc.ChainID,
Version: version.Version, Version: version.Version,
Channels: []byte{ Channels: []byte{
@@ -598,7 +596,7 @@ func (n *Node) makeNodeInfo(pubKey crypto.PubKey) p2p.NodeInfo {
}, },
Moniker: n.config.Moniker, Moniker: n.config.Moniker,
Other: []string{ Other: []string{
cmn.Fmt("wire_version=%v", wire.Version), cmn.Fmt("amino_version=%v", amino.Version),
cmn.Fmt("p2p_version=%v", p2p.Version), cmn.Fmt("p2p_version=%v", p2p.Version),
cmn.Fmt("consensus_version=%v", cs.Version), cmn.Fmt("consensus_version=%v", cs.Version),
cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version), cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version),
@@ -643,19 +641,18 @@ func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
bytes := db.Get(genesisDocKey) bytes := db.Get(genesisDocKey)
if len(bytes) == 0 { if len(bytes) == 0 {
return nil, errors.New("Genesis doc not found") return nil, errors.New("Genesis doc not found")
} else { }
var genDoc *types.GenesisDoc var genDoc *types.GenesisDoc
err := json.Unmarshal(bytes, &genDoc) err := cdc.UnmarshalJSON(bytes, &genDoc)
if err != nil { if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes)) cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
} }
return genDoc, nil return genDoc, nil
}
} }
// panics if failed to marshal the given genesis document // panics if failed to marshal the given genesis document
func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
bytes, err := json.Marshal(genDoc) bytes, err := cdc.MarshalJSON(genDoc)
if err != nil { if err != nil {
cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
} }

12
node/wire.go Normal file
View File

@@ -0,0 +1,12 @@
package node
import (
amino "github.com/tendermint/go-amino"
crypto "github.com/tendermint/go-crypto"
)
var cdc = amino.NewCodec()
func init() {
crypto.RegisterAmino(cdc)
}

View File

@@ -47,7 +47,7 @@ func NewBaseReactor(name string, impl Reactor) *BaseReactor {
func (br *BaseReactor) SetSwitch(sw *Switch) { func (br *BaseReactor) SetSwitch(sw *Switch) {
br.Switch = sw br.Switch = sw
} }
func (_ *BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil }
func (_ *BaseReactor) AddPeer(peer Peer) {} func (*BaseReactor) AddPeer(peer Peer) {}
func (_ *BaseReactor) RemovePeer(peer Peer, reason interface{}) {} func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {}
func (_ *BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {} func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}

View File

@@ -7,18 +7,21 @@ import (
"io" "io"
"math" "math"
"net" "net"
"runtime/debug" "reflect"
"sync/atomic" "sync/atomic"
"time" "time"
wire "github.com/tendermint/go-wire" amino "github.com/tendermint/go-amino"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
flow "github.com/tendermint/tmlibs/flowrate" flow "github.com/tendermint/tmlibs/flowrate"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
) )
const ( const (
numBatchMsgPackets = 10 maxPacketMsgPayloadSizeDefault = 1024 // NOTE: Must be below 16,384 bytes for 14 below.
maxPacketMsgOverheadSize = 14 // NOTE: See connection_test for derivation.
numBatchPacketMsgs = 10
minReadBufferSize = 1024 minReadBufferSize = 1024
minWriteBufferSize = 65536 minWriteBufferSize = 65536
updateStats = 2 * time.Second updateStats = 2 * time.Second
@@ -53,16 +56,15 @@ The byte id and the relative priorities of each `Channel` are configured upon
initialization of the connection. initialization of the connection.
There are two methods for sending messages: There are two methods for sending messages:
func (m MConnection) Send(chID byte, msg interface{}) bool {} func (m MConnection) Send(chID byte, msgBytes []byte) bool {}
func (m MConnection) TrySend(chID byte, msg interface{}) bool {} func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {}
`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued `Send(chID, msgBytes)` is a blocking call that waits until `msg` is
for the channel with the given id byte `chID`, or until the request times out. successfully queued for the channel with the given id byte `chID`, or until the
The message `msg` is serialized using the `tendermint/wire` submodule's request times out. The message `msg` is serialized using Go-Amino.
`WriteBinary()` reflection routine.
`TrySend(chID, msg)` is a nonblocking call that returns false if the channel's `TrySend(chID, msgBytes)` is a nonblocking call that returns false if the
queue is full. channel's queue is full.
Inbound message bytes are handled with an onReceive callback function. Inbound message bytes are handled with an onReceive callback function.
*/ */
@@ -70,8 +72,8 @@ type MConnection struct {
cmn.BaseService cmn.BaseService
conn net.Conn conn net.Conn
bufReader *bufio.Reader bufConnReader *bufio.Reader
bufWriter *bufio.Writer bufConnWriter *bufio.Writer
sendMonitor *flow.Monitor sendMonitor *flow.Monitor
recvMonitor *flow.Monitor recvMonitor *flow.Monitor
send chan struct{} send chan struct{}
@@ -102,7 +104,7 @@ type MConnConfig struct {
RecvRate int64 `mapstructure:"recv_rate"` RecvRate int64 `mapstructure:"recv_rate"`
// Maximum payload size // Maximum payload size
MaxMsgPacketPayloadSize int `mapstructure:"max_msg_packet_payload_size"` MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
// Interval to flush writes (throttled) // Interval to flush writes (throttled)
FlushThrottle time.Duration `mapstructure:"flush_throttle"` FlushThrottle time.Duration `mapstructure:"flush_throttle"`
@@ -114,8 +116,8 @@ type MConnConfig struct {
PongTimeout time.Duration `mapstructure:"pong_timeout"` PongTimeout time.Duration `mapstructure:"pong_timeout"`
} }
func (cfg *MConnConfig) maxMsgPacketTotalSize() int { func (cfg *MConnConfig) maxPacketMsgTotalSize() int {
return cfg.MaxMsgPacketPayloadSize + maxMsgPacketOverheadSize return cfg.MaxPacketMsgPayloadSize + maxPacketMsgOverheadSize
} }
// DefaultMConnConfig returns the default config. // DefaultMConnConfig returns the default config.
@@ -123,7 +125,7 @@ func DefaultMConnConfig() *MConnConfig {
return &MConnConfig{ return &MConnConfig{
SendRate: defaultSendRate, SendRate: defaultSendRate,
RecvRate: defaultRecvRate, RecvRate: defaultRecvRate,
MaxMsgPacketPayloadSize: defaultMaxMsgPacketPayloadSize, MaxPacketMsgPayloadSize: maxPacketMsgPayloadSizeDefault,
FlushThrottle: defaultFlushThrottle, FlushThrottle: defaultFlushThrottle,
PingInterval: defaultPingInterval, PingInterval: defaultPingInterval,
PongTimeout: defaultPongTimeout, PongTimeout: defaultPongTimeout,
@@ -148,8 +150,8 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec
mconn := &MConnection{ mconn := &MConnection{
conn: conn, conn: conn,
bufReader: bufio.NewReaderSize(conn, minReadBufferSize), bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize),
bufWriter: bufio.NewWriterSize(conn, minWriteBufferSize), bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize),
sendMonitor: flow.New(0, 0), sendMonitor: flow.New(0, 0),
recvMonitor: flow.New(0, 0), recvMonitor: flow.New(0, 0),
send: make(chan struct{}, 1), send: make(chan struct{}, 1),
@@ -221,7 +223,7 @@ func (c *MConnection) String() string {
func (c *MConnection) flush() { func (c *MConnection) flush() {
c.Logger.Debug("Flush", "conn", c) c.Logger.Debug("Flush", "conn", c)
err := c.bufWriter.Flush() err := c.bufConnWriter.Flush()
if err != nil { if err != nil {
c.Logger.Error("MConnection flush failed", "err", err) c.Logger.Error("MConnection flush failed", "err", err)
} }
@@ -230,8 +232,7 @@ func (c *MConnection) flush() {
// Catch panics, usually caused by remote disconnects. // Catch panics, usually caused by remote disconnects.
func (c *MConnection) _recover() { func (c *MConnection) _recover() {
if r := recover(); r != nil { if r := recover(); r != nil {
stack := debug.Stack() err := cmn.ErrorWrap(r, "recovered panic in MConnection")
err := cmn.StackError{r, stack}
c.stopForError(err) c.stopForError(err)
} }
} }
@@ -246,12 +247,12 @@ func (c *MConnection) stopForError(r interface{}) {
} }
// Queues a message to be sent to channel. // Queues a message to be sent to channel.
func (c *MConnection) Send(chID byte, msg interface{}) bool { func (c *MConnection) Send(chID byte, msgBytes []byte) bool {
if !c.IsRunning() { if !c.IsRunning() {
return false return false
} }
c.Logger.Debug("Send", "channel", chID, "conn", c, "msg", msg) //, "bytes", wire.BinaryBytes(msg)) c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
// Send message to channel. // Send message to channel.
channel, ok := c.channelsIdx[chID] channel, ok := c.channelsIdx[chID]
@@ -260,7 +261,7 @@ func (c *MConnection) Send(chID byte, msg interface{}) bool {
return false return false
} }
success := channel.sendBytes(wire.BinaryBytes(msg)) success := channel.sendBytes(msgBytes)
if success { if success {
// Wake up sendRoutine if necessary // Wake up sendRoutine if necessary
select { select {
@@ -268,19 +269,19 @@ func (c *MConnection) Send(chID byte, msg interface{}) bool {
default: default:
} }
} else { } else {
c.Logger.Error("Send failed", "channel", chID, "conn", c, "msg", msg) c.Logger.Error("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
} }
return success return success
} }
// Queues a message to be sent to channel. // Queues a message to be sent to channel.
// Nonblocking, returns true if successful. // Nonblocking, returns true if successful.
func (c *MConnection) TrySend(chID byte, msg interface{}) bool { func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool {
if !c.IsRunning() { if !c.IsRunning() {
return false return false
} }
c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msg", msg) c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes))
// Send message to channel. // Send message to channel.
channel, ok := c.channelsIdx[chID] channel, ok := c.channelsIdx[chID]
@@ -289,7 +290,7 @@ func (c *MConnection) TrySend(chID byte, msg interface{}) bool {
return false return false
} }
ok = channel.trySendBytes(wire.BinaryBytes(msg)) ok = channel.trySendBytes(msgBytes)
if ok { if ok {
// Wake up sendRoutine if necessary // Wake up sendRoutine if necessary
select { select {
@@ -322,12 +323,13 @@ func (c *MConnection) sendRoutine() {
FOR_LOOP: FOR_LOOP:
for { for {
var n int var _n int64
var err error var err error
SELECTION:
select { select {
case <-c.flushTimer.Ch: case <-c.flushTimer.Ch:
// NOTE: flushTimer.Set() must be called every time // NOTE: flushTimer.Set() must be called every time
// something is written to .bufWriter. // something is written to .bufConnWriter.
c.flush() c.flush()
case <-c.chStatsTimer.Chan(): case <-c.chStatsTimer.Chan():
for _, channel := range c.channels { for _, channel := range c.channels {
@@ -335,8 +337,11 @@ FOR_LOOP:
} }
case <-c.pingTimer.Chan(): case <-c.pingTimer.Chan():
c.Logger.Debug("Send Ping") c.Logger.Debug("Send Ping")
wire.WriteByte(packetTypePing, c.bufWriter, &n, &err) _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPing{})
c.sendMonitor.Update(int(n)) if err != nil {
break SELECTION
}
c.sendMonitor.Update(int(_n))
c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout) c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout)
c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() { c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() {
select { select {
@@ -354,14 +359,17 @@ FOR_LOOP:
} }
case <-c.pong: case <-c.pong:
c.Logger.Debug("Send Pong") c.Logger.Debug("Send Pong")
wire.WriteByte(packetTypePong, c.bufWriter, &n, &err) _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPong{})
c.sendMonitor.Update(int(n)) if err != nil {
break SELECTION
}
c.sendMonitor.Update(int(_n))
c.flush() c.flush()
case <-c.quit: case <-c.quit:
break FOR_LOOP break FOR_LOOP
case <-c.send: case <-c.send:
// Send some msgPackets // Send some PacketMsgs
eof := c.sendSomeMsgPackets() eof := c.sendSomePacketMsgs()
if !eof { if !eof {
// Keep sendRoutine awake. // Keep sendRoutine awake.
select { select {
@@ -387,15 +395,15 @@ FOR_LOOP:
// Returns true if messages from channels were exhausted. // Returns true if messages from channels were exhausted.
// Blocks in accordance to .sendMonitor throttling. // Blocks in accordance to .sendMonitor throttling.
func (c *MConnection) sendSomeMsgPackets() bool { func (c *MConnection) sendSomePacketMsgs() bool {
// Block until .sendMonitor says we can write. // Block until .sendMonitor says we can write.
// Once we're ready we send more than we asked for, // Once we're ready we send more than we asked for,
// but amortized it should even out. // but amortized it should even out.
c.sendMonitor.Limit(c.config.maxMsgPacketTotalSize(), atomic.LoadInt64(&c.config.SendRate), true) c.sendMonitor.Limit(c.config.maxPacketMsgTotalSize(), atomic.LoadInt64(&c.config.SendRate), true)
// Now send some msgPackets. // Now send some PacketMsgs.
for i := 0; i < numBatchMsgPackets; i++ { for i := 0; i < numBatchPacketMsgs; i++ {
if c.sendMsgPacket() { if c.sendPacketMsg() {
return true return true
} }
} }
@@ -403,8 +411,8 @@ func (c *MConnection) sendSomeMsgPackets() bool {
} }
// Returns true if messages from channels were exhausted. // Returns true if messages from channels were exhausted.
func (c *MConnection) sendMsgPacket() bool { func (c *MConnection) sendPacketMsg() bool {
// Choose a channel to create a msgPacket from. // Choose a channel to create a PacketMsg from.
// The chosen channel will be the one whose recentlySent/priority is the least. // The chosen channel will be the one whose recentlySent/priority is the least.
var leastRatio float32 = math.MaxFloat32 var leastRatio float32 = math.MaxFloat32
var leastChannel *Channel var leastChannel *Channel
@@ -424,23 +432,22 @@ func (c *MConnection) sendMsgPacket() bool {
// Nothing to send? // Nothing to send?
if leastChannel == nil { if leastChannel == nil {
return true return true
} else {
// c.Logger.Info("Found a msgPacket to send")
} }
// c.Logger.Info("Found a msgPacket to send")
// Make & send a msgPacket from this channel // Make & send a PacketMsg from this channel
n, err := leastChannel.writeMsgPacketTo(c.bufWriter) _n, err := leastChannel.writePacketMsgTo(c.bufConnWriter)
if err != nil { if err != nil {
c.Logger.Error("Failed to write msgPacket", "err", err) c.Logger.Error("Failed to write PacketMsg", "err", err)
c.stopForError(err) c.stopForError(err)
return true return true
} }
c.sendMonitor.Update(int(n)) c.sendMonitor.Update(int(_n))
c.flushTimer.Set() c.flushTimer.Set()
return false return false
} }
// recvRoutine reads msgPackets and reconstructs the message using the channels' "recving" buffer. // recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer.
// After a whole message has been assembled, it's pushed to onReceive(). // After a whole message has been assembled, it's pushed to onReceive().
// Blocks depending on how the connection is throttled. // Blocks depending on how the connection is throttled.
// Otherwise, it never blocks. // Otherwise, it never blocks.
@@ -450,28 +457,28 @@ func (c *MConnection) recvRoutine() {
FOR_LOOP: FOR_LOOP:
for { for {
// Block until .recvMonitor says we can read. // Block until .recvMonitor says we can read.
c.recvMonitor.Limit(c.config.maxMsgPacketTotalSize(), atomic.LoadInt64(&c.config.RecvRate), true) c.recvMonitor.Limit(c.config.maxPacketMsgTotalSize(), atomic.LoadInt64(&c.config.RecvRate), true)
// Peek into bufConnReader for debugging
/* /*
// Peek into bufReader for debugging if numBytes := c.bufConnReader.Buffered(); numBytes > 0 {
if numBytes := c.bufReader.Buffered(); numBytes > 0 { bz, err := c.bufConnReader.Peek(cmn.MinInt(numBytes, 100))
log.Info("Peek connection buffer", "numBytes", numBytes, "bytes", log15.Lazy{func() []byte {
bytes, err := c.bufReader.Peek(cmn.MinInt(numBytes, 100))
if err == nil { if err == nil {
return bytes // return
} else { } else {
log.Warn("Error peeking connection buffer", "err", err) c.Logger.Debug("Error peeking connection buffer", "err", err)
return nil // return nil
} }
}}) c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz)
} }
*/ */
// Read packet type // Read packet type
var n int var packet Packet
var _n int64
var err error var err error
pktType := wire.ReadByte(c.bufReader, &n, &err) _n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c.config.maxPacketMsgTotalSize()))
c.recvMonitor.Update(int(n)) c.recvMonitor.Update(int(_n))
if err != nil { if err != nil {
if c.IsRunning() { if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err)
@@ -481,8 +488,8 @@ FOR_LOOP:
} }
// Read more depending on packet type. // Read more depending on packet type.
switch pktType { switch pkt := packet.(type) {
case packetTypePing: case PacketPing:
// TODO: prevent abuse, as they cause flush()'s. // TODO: prevent abuse, as they cause flush()'s.
// https://github.com/tendermint/tendermint/issues/1190 // https://github.com/tendermint/tendermint/issues/1190
c.Logger.Debug("Receive Ping") c.Logger.Debug("Receive Ping")
@@ -491,24 +498,14 @@ FOR_LOOP:
default: default:
// never block // never block
} }
case packetTypePong: case PacketPong:
c.Logger.Debug("Receive Pong") c.Logger.Debug("Receive Pong")
select { select {
case c.pongTimeoutCh <- false: case c.pongTimeoutCh <- false:
default: default:
// never block // never block
} }
case packetTypeMsg: case PacketMsg:
pkt, n, err := msgPacket{}, int(0), error(nil)
wire.ReadBinaryPtr(&pkt, c.bufReader, c.config.maxMsgPacketTotalSize(), &n, &err)
c.recvMonitor.Update(int(n))
if err != nil {
if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
c.stopForError(err)
}
break FOR_LOOP
}
channel, ok := c.channelsIdx[pkt.ChannelID] channel, ok := c.channelsIdx[pkt.ChannelID]
if !ok || channel == nil { if !ok || channel == nil {
err := fmt.Errorf("Unknown channel %X", pkt.ChannelID) err := fmt.Errorf("Unknown channel %X", pkt.ChannelID)
@@ -517,7 +514,7 @@ FOR_LOOP:
break FOR_LOOP break FOR_LOOP
} }
msgBytes, err := channel.recvMsgPacket(pkt) msgBytes, err := channel.recvPacketMsg(pkt)
if err != nil { if err != nil {
if c.IsRunning() { if c.IsRunning() {
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
@@ -531,7 +528,7 @@ FOR_LOOP:
c.onReceive(pkt.ChannelID, msgBytes) c.onReceive(pkt.ChannelID, msgBytes)
} }
default: default:
err := fmt.Errorf("Unknown message type %X", pktType) err := fmt.Errorf("Unknown message type %v", reflect.TypeOf(packet))
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
c.stopForError(err) c.stopForError(err)
break FOR_LOOP break FOR_LOOP
@@ -623,7 +620,7 @@ type Channel struct {
sending []byte sending []byte
recentlySent int64 // exponential moving average recentlySent int64 // exponential moving average
maxMsgPacketPayloadSize int maxPacketMsgPayloadSize int
Logger log.Logger Logger log.Logger
} }
@@ -638,7 +635,7 @@ func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel {
desc: desc, desc: desc,
sendQueue: make(chan []byte, desc.SendQueueCapacity), sendQueue: make(chan []byte, desc.SendQueueCapacity),
recving: make([]byte, 0, desc.RecvBufferCapacity), recving: make([]byte, 0, desc.RecvBufferCapacity),
maxMsgPacketPayloadSize: conn.config.MaxMsgPacketPayloadSize, maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize,
} }
} }
@@ -683,8 +680,8 @@ func (ch *Channel) canSend() bool {
return ch.loadSendQueueSize() < defaultSendQueueCapacity return ch.loadSendQueueSize() < defaultSendQueueCapacity
} }
// Returns true if any msgPackets are pending to be sent. // Returns true if any PacketMsgs are pending to be sent.
// Call before calling nextMsgPacket() // Call before calling nextPacketMsg()
// Goroutine-safe // Goroutine-safe
func (ch *Channel) isSendPending() bool { func (ch *Channel) isSendPending() bool {
if len(ch.sending) == 0 { if len(ch.sending) == 0 {
@@ -696,12 +693,12 @@ func (ch *Channel) isSendPending() bool {
return true return true
} }
// Creates a new msgPacket to send. // Creates a new PacketMsg to send.
// Not goroutine-safe // Not goroutine-safe
func (ch *Channel) nextMsgPacket() msgPacket { func (ch *Channel) nextPacketMsg() PacketMsg {
packet := msgPacket{} packet := PacketMsg{}
packet.ChannelID = byte(ch.desc.ID) packet.ChannelID = byte(ch.desc.ID)
maxSize := ch.maxMsgPacketPayloadSize maxSize := ch.maxPacketMsgPayloadSize
packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))] packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))]
if len(ch.sending) <= maxSize { if len(ch.sending) <= maxSize {
packet.EOF = byte(0x01) packet.EOF = byte(0x01)
@@ -714,30 +711,23 @@ func (ch *Channel) nextMsgPacket() msgPacket {
return packet return packet
} }
// Writes next msgPacket to w. // Writes next PacketMsg to w and updates c.recentlySent.
// Not goroutine-safe // Not goroutine-safe
func (ch *Channel) writeMsgPacketTo(w io.Writer) (n int, err error) { func (ch *Channel) writePacketMsgTo(w io.Writer) (n int64, err error) {
packet := ch.nextMsgPacket() var packet = ch.nextPacketMsg()
ch.Logger.Debug("Write Msg Packet", "conn", ch.conn, "packet", packet) n, err = cdc.MarshalBinaryWriter(w, packet)
writeMsgPacketTo(packet, w, &n, &err) ch.recentlySent += n
if err == nil {
ch.recentlySent += int64(n)
}
return return
} }
func writeMsgPacketTo(packet msgPacket, w io.Writer, n *int, err *error) { // Handles incoming PacketMsgs. It returns a message bytes if message is
wire.WriteByte(packetTypeMsg, w, n, err) // complete. NOTE message bytes may change on next call to recvPacketMsg.
wire.WriteBinary(packet, w, n, err)
}
// Handles incoming msgPackets. It returns a message bytes if message is
// complete. NOTE message bytes may change on next call to recvMsgPacket.
// Not goroutine-safe // Not goroutine-safe
func (ch *Channel) recvMsgPacket(packet msgPacket) ([]byte, error) { func (ch *Channel) recvPacketMsg(packet PacketMsg) ([]byte, error) {
ch.Logger.Debug("Read Msg Packet", "conn", ch.conn, "packet", packet) ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet)
if ch.desc.RecvMessageCapacity < len(ch.recving)+len(packet.Bytes) { var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Bytes)
return nil, wire.ErrBinaryReadOverflow if recvCap < recvReceived {
return nil, fmt.Errorf("Received message exceeds available capacity: %v < %v", recvCap, recvReceived)
} }
ch.recving = append(ch.recving, packet.Bytes...) ch.recving = append(ch.recving, packet.Bytes...)
if packet.EOF == byte(0x01) { if packet.EOF == byte(0x01) {
@@ -761,24 +751,36 @@ func (ch *Channel) updateStats() {
ch.recentlySent = int64(float64(ch.recentlySent) * 0.8) ch.recentlySent = int64(float64(ch.recentlySent) * 0.8)
} }
//----------------------------------------------------------------------------- //----------------------------------------
// Packet
const ( type Packet interface {
defaultMaxMsgPacketPayloadSize = 1024 AssertIsPacket()
}
maxMsgPacketOverheadSize = 10 // It's actually lower but good enough func RegisterPacket(cdc *amino.Codec) {
packetTypePing = byte(0x01) cdc.RegisterInterface((*Packet)(nil), nil)
packetTypePong = byte(0x02) cdc.RegisterConcrete(PacketPing{}, "tendermint/p2p/PacketPing", nil)
packetTypeMsg = byte(0x03) cdc.RegisterConcrete(PacketPong{}, "tendermint/p2p/PacketPong", nil)
) cdc.RegisterConcrete(PacketMsg{}, "tendermint/p2p/PacketMsg", nil)
}
// Messages in channels are chopped into smaller msgPackets for multiplexing. func (_ PacketPing) AssertIsPacket() {}
type msgPacket struct { func (_ PacketPong) AssertIsPacket() {}
func (_ PacketMsg) AssertIsPacket() {}
type PacketPing struct {
}
type PacketPong struct {
}
type PacketMsg struct {
ChannelID byte ChannelID byte
EOF byte // 1 means message ends here. EOF byte // 1 means message ends here.
Bytes []byte Bytes []byte
} }
func (p msgPacket) String() string { func (mp PacketMsg) String() string {
return fmt.Sprintf("MsgPacket{%X:%X T:%X}", p.ChannelID, p.Bytes, p.EOF) return fmt.Sprintf("PacketMsg{%X:%X T:%X}", mp.ChannelID, mp.Bytes, mp.EOF)
} }

View File

@@ -1,13 +1,14 @@
package conn package conn
import ( import (
"bytes"
"net" "net"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
wire "github.com/tendermint/go-wire" amino "github.com/tendermint/go-amino"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
) )
@@ -32,41 +33,37 @@ func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msg
} }
func TestMConnectionSend(t *testing.T) { func TestMConnectionSend(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := NetPipe() server, client := NetPipe()
defer server.Close() // nolint: errcheck defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck defer client.Close() // nolint: errcheck
mconn := createTestMConnection(client) mconn := createTestMConnection(client)
err := mconn.Start() err := mconn.Start()
require.Nil(err) require.Nil(t, err)
defer mconn.Stop() defer mconn.Stop()
msg := "Ant-Man" msg := []byte("Ant-Man")
assert.True(mconn.Send(0x01, msg)) assert.True(t, mconn.Send(0x01, msg))
// Note: subsequent Send/TrySend calls could pass because we are reading from // Note: subsequent Send/TrySend calls could pass because we are reading from
// the send queue in a separate goroutine. // the send queue in a separate goroutine.
_, err = server.Read(make([]byte, len(msg))) _, err = server.Read(make([]byte, len(msg)))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
assert.True(mconn.CanSend(0x01)) assert.True(t, mconn.CanSend(0x01))
msg = "Spider-Man" msg = []byte("Spider-Man")
assert.True(mconn.TrySend(0x01, msg)) assert.True(t, mconn.TrySend(0x01, msg))
_, err = server.Read(make([]byte, len(msg))) _, err = server.Read(make([]byte, len(msg)))
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
assert.False(mconn.CanSend(0x05), "CanSend should return false because channel is unknown") assert.False(t, mconn.CanSend(0x05), "CanSend should return false because channel is unknown")
assert.False(mconn.Send(0x05, "Absorbing Man"), "Send should return false because channel is unknown") assert.False(t, mconn.Send(0x05, []byte("Absorbing Man")), "Send should return false because channel is unknown")
} }
func TestMConnectionReceive(t *testing.T) { func TestMConnectionReceive(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := NetPipe() server, client := NetPipe()
defer server.Close() // nolint: errcheck defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck defer client.Close() // nolint: errcheck
@@ -81,20 +78,20 @@ func TestMConnectionReceive(t *testing.T) {
} }
mconn1 := createMConnectionWithCallbacks(client, onReceive, onError) mconn1 := createMConnectionWithCallbacks(client, onReceive, onError)
err := mconn1.Start() err := mconn1.Start()
require.Nil(err) require.Nil(t, err)
defer mconn1.Stop() defer mconn1.Stop()
mconn2 := createTestMConnection(server) mconn2 := createTestMConnection(server)
err = mconn2.Start() err = mconn2.Start()
require.Nil(err) require.Nil(t, err)
defer mconn2.Stop() defer mconn2.Stop()
msg := "Cyclops" msg := []byte("Cyclops")
assert.True(mconn2.Send(0x01, msg)) assert.True(t, mconn2.Send(0x01, msg))
select { select {
case receivedBytes := <-receivedCh: case receivedBytes := <-receivedCh:
assert.Equal([]byte(msg), receivedBytes[2:]) // first 3 bytes are internal assert.Equal(t, []byte(msg), receivedBytes)
case err := <-errorsCh: case err := <-errorsCh:
t.Fatalf("Expected %s, got %+v", msg, err) t.Fatalf("Expected %s, got %+v", msg, err)
case <-time.After(500 * time.Millisecond): case <-time.After(500 * time.Millisecond):
@@ -103,20 +100,18 @@ func TestMConnectionReceive(t *testing.T) {
} }
func TestMConnectionStatus(t *testing.T) { func TestMConnectionStatus(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := NetPipe() server, client := NetPipe()
defer server.Close() // nolint: errcheck defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck defer client.Close() // nolint: errcheck
mconn := createTestMConnection(client) mconn := createTestMConnection(client)
err := mconn.Start() err := mconn.Start()
require.Nil(err) require.Nil(t, err)
defer mconn.Stop() defer mconn.Stop()
status := mconn.Status() status := mconn.Status()
assert.NotNil(status) assert.NotNil(t, status)
assert.Zero(status.Channels[0].SendQueueSize) assert.Zero(t, status.Channels[0].SendQueueSize)
} }
func TestMConnectionPongTimeoutResultsInError(t *testing.T) { func TestMConnectionPongTimeoutResultsInError(t *testing.T) {
@@ -140,7 +135,10 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) {
serverGotPing := make(chan struct{}) serverGotPing := make(chan struct{})
go func() { go func() {
// read ping // read ping
server.Read(make([]byte, 1)) var pkt PacketPing
const maxPacketPingSize = 1024
_, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPacketPingSize)
assert.Nil(t, err)
serverGotPing <- struct{}{} serverGotPing <- struct{}{}
}() }()
<-serverGotPing <-serverGotPing
@@ -175,21 +173,22 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) {
defer mconn.Stop() defer mconn.Stop()
// sending 3 pongs in a row (abuse) // sending 3 pongs in a row (abuse)
_, err = server.Write([]byte{packetTypePong}) _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err) require.Nil(t, err)
_, err = server.Write([]byte{packetTypePong}) _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err) require.Nil(t, err)
_, err = server.Write([]byte{packetTypePong}) _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err) require.Nil(t, err)
serverGotPing := make(chan struct{}) serverGotPing := make(chan struct{})
go func() { go func() {
// read ping (one byte) // read ping (one byte)
_, err = server.Read(make([]byte, 1)) var packet, err = Packet(nil), error(nil)
_, err = cdc.UnmarshalBinaryReader(server, &packet, 1024)
require.Nil(t, err) require.Nil(t, err)
serverGotPing <- struct{}{} serverGotPing <- struct{}{}
// respond with pong // respond with pong
_, err = server.Write([]byte{packetTypePong}) _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err) require.Nil(t, err)
}() }()
<-serverGotPing <-serverGotPing
@@ -225,17 +224,18 @@ func TestMConnectionMultiplePings(t *testing.T) {
// sending 3 pings in a row (abuse) // sending 3 pings in a row (abuse)
// see https://github.com/tendermint/tendermint/issues/1190 // see https://github.com/tendermint/tendermint/issues/1190
_, err = server.Write([]byte{packetTypePing}) _, err = server.Write(cdc.MustMarshalBinary(PacketPing{}))
require.Nil(t, err) require.Nil(t, err)
_, err = server.Read(make([]byte, 1)) var pkt PacketPong
_, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
require.Nil(t, err) require.Nil(t, err)
_, err = server.Write([]byte{packetTypePing}) _, err = server.Write(cdc.MustMarshalBinary(PacketPing{}))
require.Nil(t, err) require.Nil(t, err)
_, err = server.Read(make([]byte, 1)) _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
require.Nil(t, err) require.Nil(t, err)
_, err = server.Write([]byte{packetTypePing}) _, err = server.Write(cdc.MustMarshalBinary(PacketPing{}))
require.Nil(t, err) require.Nil(t, err)
_, err = server.Read(make([]byte, 1)) _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
require.Nil(t, err) require.Nil(t, err)
assert.True(t, mconn.IsRunning()) assert.True(t, mconn.IsRunning())
@@ -262,18 +262,21 @@ func TestMConnectionPingPongs(t *testing.T) {
serverGotPing := make(chan struct{}) serverGotPing := make(chan struct{})
go func() { go func() {
// read ping // read ping
server.Read(make([]byte, 1)) var pkt PacketPing
_, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
require.Nil(t, err)
serverGotPing <- struct{}{} serverGotPing <- struct{}{}
// respond with pong // respond with pong
_, err = server.Write([]byte{packetTypePong}) _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err) require.Nil(t, err)
time.Sleep(mconn.config.PingInterval) time.Sleep(mconn.config.PingInterval)
// read ping // read ping
server.Read(make([]byte, 1)) _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024)
require.Nil(t, err)
// respond with pong // respond with pong
_, err = server.Write([]byte{packetTypePong}) _, err = server.Write(cdc.MustMarshalBinary(PacketPong{}))
require.Nil(t, err) require.Nil(t, err)
}() }()
<-serverGotPing <-serverGotPing
@@ -290,8 +293,6 @@ func TestMConnectionPingPongs(t *testing.T) {
} }
func TestMConnectionStopsAndReturnsError(t *testing.T) { func TestMConnectionStopsAndReturnsError(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := NetPipe() server, client := NetPipe()
defer server.Close() // nolint: errcheck defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck defer client.Close() // nolint: errcheck
@@ -306,7 +307,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
} }
mconn := createMConnectionWithCallbacks(client, onReceive, onError) mconn := createMConnectionWithCallbacks(client, onReceive, onError)
err := mconn.Start() err := mconn.Start()
require.Nil(err) require.Nil(t, err)
defer mconn.Stop() defer mconn.Stop()
if err := client.Close(); err != nil { if err := client.Close(); err != nil {
@@ -317,14 +318,14 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
case receivedBytes := <-receivedCh: case receivedBytes := <-receivedCh:
t.Fatalf("Expected error, got %v", receivedBytes) t.Fatalf("Expected error, got %v", receivedBytes)
case err := <-errorsCh: case err := <-errorsCh:
assert.NotNil(err) assert.NotNil(t, err)
assert.False(mconn.IsRunning()) assert.False(t, mconn.IsRunning())
case <-time.After(500 * time.Millisecond): case <-time.After(500 * time.Millisecond):
t.Fatal("Did not receive error in 500ms") t.Fatal("Did not receive error in 500ms")
} }
} }
func newClientAndServerConnsForReadErrors(require *require.Assertions, chOnErr chan struct{}) (*MConnection, *MConnection) { func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (*MConnection, *MConnection) {
server, client := NetPipe() server, client := NetPipe()
onReceive := func(chID byte, msgBytes []byte) {} onReceive := func(chID byte, msgBytes []byte) {}
@@ -338,7 +339,7 @@ func newClientAndServerConnsForReadErrors(require *require.Assertions, chOnErr c
mconnClient := NewMConnection(client, chDescs, onReceive, onError) mconnClient := NewMConnection(client, chDescs, onReceive, onError)
mconnClient.SetLogger(log.TestingLogger().With("module", "client")) mconnClient.SetLogger(log.TestingLogger().With("module", "client"))
err := mconnClient.Start() err := mconnClient.Start()
require.Nil(err) require.Nil(t, err)
// create server conn with 1 channel // create server conn with 1 channel
// it fires on chOnErr when there's an error // it fires on chOnErr when there's an error
@@ -349,7 +350,7 @@ func newClientAndServerConnsForReadErrors(require *require.Assertions, chOnErr c
mconnServer := createMConnectionWithCallbacks(server, onReceive, onError) mconnServer := createMConnectionWithCallbacks(server, onReceive, onError)
mconnServer.SetLogger(serverLogger) mconnServer.SetLogger(serverLogger)
err = mconnServer.Start() err = mconnServer.Start()
require.Nil(err) require.Nil(t, err)
return mconnClient, mconnServer return mconnClient, mconnServer
} }
@@ -364,50 +365,45 @@ func expectSend(ch chan struct{}) bool {
} }
func TestMConnectionReadErrorBadEncoding(t *testing.T) { func TestMConnectionReadErrorBadEncoding(t *testing.T) {
assert, require := assert.New(t), require.New(t)
chOnErr := make(chan struct{}) chOnErr := make(chan struct{})
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
defer mconnClient.Stop() defer mconnClient.Stop()
defer mconnServer.Stop() defer mconnServer.Stop()
client := mconnClient.conn client := mconnClient.conn
msg := "Ant-Man"
// send badly encoded msgPacket // send badly encoded msgPacket
var n int bz := cdc.MustMarshalBinary(PacketMsg{})
var err error bz[4] += 0x01 // Invalid prefix bytes.
wire.WriteByte(packetTypeMsg, client, &n, &err)
wire.WriteByteSlice([]byte(msg), client, &n, &err) // Write it.
assert.True(expectSend(chOnErr), "badly encoded msgPacket") _, err := client.Write(bz)
assert.Nil(t, err)
assert.True(t, expectSend(chOnErr), "badly encoded msgPacket")
} }
func TestMConnectionReadErrorUnknownChannel(t *testing.T) { func TestMConnectionReadErrorUnknownChannel(t *testing.T) {
assert, require := assert.New(t), require.New(t)
chOnErr := make(chan struct{}) chOnErr := make(chan struct{})
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
defer mconnClient.Stop() defer mconnClient.Stop()
defer mconnServer.Stop() defer mconnServer.Stop()
msg := "Ant-Man" msg := []byte("Ant-Man")
// fail to send msg on channel unknown by client // fail to send msg on channel unknown by client
assert.False(mconnClient.Send(0x03, msg)) assert.False(t, mconnClient.Send(0x03, msg))
// send msg on channel unknown by the server. // send msg on channel unknown by the server.
// should cause an error // should cause an error
assert.True(mconnClient.Send(0x02, msg)) assert.True(t, mconnClient.Send(0x02, msg))
assert.True(expectSend(chOnErr), "unknown channel") assert.True(t, expectSend(chOnErr), "unknown channel")
} }
func TestMConnectionReadErrorLongMessage(t *testing.T) { func TestMConnectionReadErrorLongMessage(t *testing.T) {
assert, require := assert.New(t), require.New(t)
chOnErr := make(chan struct{}) chOnErr := make(chan struct{})
chOnRcv := make(chan struct{}) chOnRcv := make(chan struct{})
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
defer mconnClient.Stop() defer mconnClient.Stop()
defer mconnServer.Stop() defer mconnServer.Stop()
@@ -418,65 +414,81 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) {
client := mconnClient.conn client := mconnClient.conn
// send msg thats just right // send msg thats just right
var n int
var err error var err error
packet := msgPacket{ var buf = new(bytes.Buffer)
// - Uvarint length of MustMarshalBinary(packet) = 1 or 2 bytes
// (as long as it's less than 16,384 bytes)
// - Prefix bytes = 4 bytes
// - ChannelID field key + byte = 2 bytes
// - EOF field key + byte = 2 bytes
// - Bytes field key = 1 bytes
// - Uvarint length of MustMarshalBinary(bytes) = 1 or 2 bytes
// - Struct terminator = 1 byte
// = up to 14 bytes overhead for the packet.
var packet = PacketMsg{
ChannelID: 0x01, ChannelID: 0x01,
Bytes: make([]byte, mconnClient.config.maxMsgPacketTotalSize()-5),
EOF: 1, EOF: 1,
Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize),
} }
writeMsgPacketTo(packet, client, &n, &err) _, err = cdc.MarshalBinaryWriter(buf, packet)
assert.True(expectSend(chOnRcv), "msg just right") assert.Nil(t, err)
_, err = client.Write(buf.Bytes())
assert.Nil(t, err)
assert.True(t, expectSend(chOnRcv), "msg just right")
assert.False(t, expectSend(chOnErr), "msg just right")
// send msg thats too long // send msg thats too long
packet = msgPacket{ buf = new(bytes.Buffer)
packet = PacketMsg{
ChannelID: 0x01, ChannelID: 0x01,
Bytes: make([]byte, mconnClient.config.maxMsgPacketTotalSize()-4),
EOF: 1, EOF: 1,
Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize+1),
} }
writeMsgPacketTo(packet, client, &n, &err) _, err = cdc.MarshalBinaryWriter(buf, packet)
assert.True(expectSend(chOnErr), "msg too long") assert.Nil(t, err)
_, err = client.Write(buf.Bytes())
assert.NotNil(t, err)
assert.False(t, expectSend(chOnRcv), "msg too long")
assert.True(t, expectSend(chOnErr), "msg too long")
} }
func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { func TestMConnectionReadErrorUnknownMsgType(t *testing.T) {
assert, require := assert.New(t), require.New(t)
chOnErr := make(chan struct{}) chOnErr := make(chan struct{})
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr)
defer mconnClient.Stop() defer mconnClient.Stop()
defer mconnServer.Stop() defer mconnServer.Stop()
// send msg with unknown msg type // send msg with unknown msg type
var n int err := error(nil)
var err error err = amino.EncodeUvarint(mconnClient.conn, 4)
wire.WriteByte(0x04, mconnClient.conn, &n, &err) assert.Nil(t, err)
assert.True(expectSend(chOnErr), "unknown msg type") _, err = mconnClient.conn.Write([]byte{0xFF, 0xFF, 0xFF, 0xFF})
assert.Nil(t, err)
assert.True(t, expectSend(chOnErr), "unknown msg type")
} }
func TestMConnectionTrySend(t *testing.T) { func TestMConnectionTrySend(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := NetPipe() server, client := NetPipe()
defer server.Close() defer server.Close()
defer client.Close() defer client.Close()
mconn := createTestMConnection(client) mconn := createTestMConnection(client)
err := mconn.Start() err := mconn.Start()
require.Nil(err) require.Nil(t, err)
defer mconn.Stop() defer mconn.Stop()
msg := "Semicolon-Woman" msg := []byte("Semicolon-Woman")
resultCh := make(chan string, 2) resultCh := make(chan string, 2)
assert.True(mconn.TrySend(0x01, msg)) assert.True(t, mconn.TrySend(0x01, msg))
server.Read(make([]byte, len(msg))) server.Read(make([]byte, len(msg)))
assert.True(mconn.CanSend(0x01)) assert.True(t, mconn.CanSend(0x01))
assert.True(mconn.TrySend(0x01, msg)) assert.True(t, mconn.TrySend(0x01, msg))
assert.False(mconn.CanSend(0x01)) assert.False(t, mconn.CanSend(0x01))
go func() { go func() {
mconn.TrySend(0x01, msg) mconn.TrySend(0x01, msg)
resultCh <- "TrySend" resultCh <- "TrySend"
}() }()
assert.False(mconn.CanSend(0x01)) assert.False(t, mconn.CanSend(0x01))
assert.False(mconn.TrySend(0x01, msg)) assert.False(t, mconn.TrySend(0x01, msg))
assert.Equal("TrySend", <-resultCh) assert.Equal(t, "TrySend", <-resultCh)
} }

View File

@@ -21,16 +21,14 @@ import (
"golang.org/x/crypto/ripemd160" "golang.org/x/crypto/ripemd160"
"github.com/tendermint/go-crypto" "github.com/tendermint/go-crypto"
"github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
) )
// 2 + 1024 == 1026 total frame size // 4 + 1024 == 1028 total frame size
const dataLenSize = 2 // uint16 to describe the length, is <= dataMaxSize const dataLenSize = 4
const dataMaxSize = 1024 const dataMaxSize = 1024
const totalFrameSize = dataMaxSize + dataLenSize const totalFrameSize = dataMaxSize + dataLenSize
const sealedFrameSize = totalFrameSize + secretbox.Overhead const sealedFrameSize = totalFrameSize + secretbox.Overhead
const authSigMsgSize = (32 + 1) + (64 + 1) // fixed size (length prefixed) byte arrays
// Implements net.Conn // Implements net.Conn
type SecretConnection struct { type SecretConnection struct {
@@ -113,7 +111,7 @@ func (sc *SecretConnection) RemotePubKey() crypto.PubKey {
// CONTRACT: data smaller than dataMaxSize is read atomically. // CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Write(data []byte) (n int, err error) { func (sc *SecretConnection) Write(data []byte) (n int, err error) {
for 0 < len(data) { for 0 < len(data) {
var frame []byte = make([]byte, totalFrameSize) var frame = make([]byte, totalFrameSize)
var chunk []byte var chunk []byte
if dataMaxSize < len(data) { if dataMaxSize < len(data) {
chunk = data[:dataMaxSize] chunk = data[:dataMaxSize]
@@ -123,7 +121,7 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) {
data = nil data = nil
} }
chunkLength := len(chunk) chunkLength := len(chunk)
binary.BigEndian.PutUint16(frame, uint16(chunkLength)) binary.BigEndian.PutUint32(frame, uint32(chunkLength))
copy(frame[dataLenSize:], chunk) copy(frame[dataLenSize:], chunk)
// encrypt the frame // encrypt the frame
@@ -136,9 +134,8 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) {
_, err := sc.conn.Write(sealedFrame) _, err := sc.conn.Write(sealedFrame)
if err != nil { if err != nil {
return n, err return n, err
} else {
n += len(chunk)
} }
n += len(chunk)
} }
return return
} }
@@ -146,8 +143,8 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) {
// CONTRACT: data smaller than dataMaxSize is read atomically. // CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Read(data []byte) (n int, err error) { func (sc *SecretConnection) Read(data []byte) (n int, err error) {
if 0 < len(sc.recvBuffer) { if 0 < len(sc.recvBuffer) {
n_ := copy(data, sc.recvBuffer) n = copy(data, sc.recvBuffer)
sc.recvBuffer = sc.recvBuffer[n_:] sc.recvBuffer = sc.recvBuffer[n:]
return return
} }
@@ -167,7 +164,7 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) {
incr2Nonce(sc.recvNonce) incr2Nonce(sc.recvNonce)
// end decryption // end decryption
var chunkLength = binary.BigEndian.Uint16(frame) // read the first two bytes var chunkLength = binary.BigEndian.Uint32(frame) // read the first two bytes
if chunkLength > dataMaxSize { if chunkLength > dataMaxSize {
return 0, errors.New("chunkLength is greater than dataMaxSize") return 0, errors.New("chunkLength is greater than dataMaxSize")
} }
@@ -194,32 +191,43 @@ func genEphKeys() (ephPub, ephPriv *[32]byte) {
var err error var err error
ephPub, ephPriv, err = box.GenerateKey(crand.Reader) ephPub, ephPriv, err = box.GenerateKey(crand.Reader)
if err != nil { if err != nil {
cmn.PanicCrisis("Could not generate ephemeral keypairs") panic("Could not generate ephemeral keypairs")
} }
return return
} }
func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[32]byte, err error) { func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[32]byte, err error) {
var err1, err2 error
cmn.Parallel( // Send our pubkey and receive theirs in tandem.
func() { var trs, _ = cmn.Parallel(
_, err1 = conn.Write(locEphPub[:]) func(_ int) (val interface{}, err error, abort bool) {
var _, err1 = cdc.MarshalBinaryWriter(conn, locEphPub)
if err1 != nil {
return nil, err1, true // abort
} else {
return nil, nil, false
}
}, },
func() { func(_ int) (val interface{}, err error, abort bool) {
remEphPub = new([32]byte) var _remEphPub [32]byte
_, err2 = io.ReadFull(conn, remEphPub[:]) var _, err2 = cdc.UnmarshalBinaryReader(conn, &_remEphPub, 1024*1024) // TODO
if err2 != nil {
return nil, err2, true // abort
} else {
return _remEphPub, nil, false
}
}, },
) )
if err1 != nil { // If error:
return nil, err1 if trs.FirstError() != nil {
} err = trs.FirstError()
if err2 != nil { return
return nil, err2
} }
return remEphPub, nil // Otherwise:
var _remEphPub = trs.FirstValue().([32]byte)
return &_remEphPub, nil
} }
func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) { func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) {
@@ -268,33 +276,37 @@ type authSigMessage struct {
Sig crypto.Signature Sig crypto.Signature
} }
func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature crypto.Signature) (*authSigMessage, error) { func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature crypto.Signature) (recvMsg authSigMessage, err error) {
var recvMsg authSigMessage
var err1, err2 error
cmn.Parallel( // Send our info and receive theirs in tandem.
func() { var trs, _ = cmn.Parallel(
msgBytes := wire.BinaryBytes(authSigMessage{pubKey.Wrap(), signature.Wrap()}) func(_ int) (val interface{}, err error, abort bool) {
_, err1 = sc.Write(msgBytes) var _, err1 = cdc.MarshalBinaryWriter(sc, authSigMessage{pubKey, signature})
if err1 != nil {
return nil, err1, true // abort
} else {
return nil, nil, false
}
}, },
func() { func(_ int) (val interface{}, err error, abort bool) {
readBuffer := make([]byte, authSigMsgSize) var _recvMsg authSigMessage
_, err2 = io.ReadFull(sc, readBuffer) var _, err2 = cdc.UnmarshalBinaryReader(sc, &_recvMsg, 1024*1024) // TODO
if err2 != nil { if err2 != nil {
return nil, err2, true // abort
} else {
return _recvMsg, nil, false
}
},
)
// If error:
if trs.FirstError() != nil {
err = trs.FirstError()
return return
} }
n := int(0) // not used.
recvMsg = wire.ReadBinary(authSigMessage{}, bytes.NewBuffer(readBuffer), authSigMsgSize, &n, &err2).(authSigMessage)
})
if err1 != nil { var _recvMsg = trs.FirstValue().(authSigMessage)
return nil, err1 return _recvMsg, nil
}
if err2 != nil {
return nil, err2
}
return &recvMsg, nil
} }
//-------------------------------------------------------------------------------- //--------------------------------------------------------------------------------
@@ -328,7 +340,7 @@ func incr2Nonce(nonce *[24]byte) {
// increment nonce big-endian by 1 with wraparound. // increment nonce big-endian by 1 with wraparound.
func incrNonce(nonce *[24]byte) { func incrNonce(nonce *[24]byte) {
for i := 23; 0 <= i; i-- { for i := 23; 0 <= i; i-- {
nonce[i] += 1 nonce[i]++
if nonce[i] != 0 { if nonce[i] != 0 {
return return
} }

View File

@@ -1,9 +1,12 @@
package conn package conn
import ( import (
"fmt"
"io" "io"
"testing" "testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
crypto "github.com/tendermint/go-crypto" crypto "github.com/tendermint/go-crypto"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
) )
@@ -30,39 +33,49 @@ func makeKVStoreConnPair() (fooConn, barConn kvstoreConn) {
} }
func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) { func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) {
fooConn, barConn := makeKVStoreConnPair()
fooPrvKey := crypto.GenPrivKeyEd25519().Wrap()
fooPubKey := fooPrvKey.PubKey()
barPrvKey := crypto.GenPrivKeyEd25519().Wrap()
barPubKey := barPrvKey.PubKey()
cmn.Parallel( var fooConn, barConn = makeKVStoreConnPair()
func() { var fooPrvKey = crypto.GenPrivKeyEd25519()
var err error var fooPubKey = fooPrvKey.PubKey()
var barPrvKey = crypto.GenPrivKeyEd25519()
var barPubKey = barPrvKey.PubKey()
// Make connections from both sides in parallel.
var trs, ok = cmn.Parallel(
func(_ int) (val interface{}, err error, abort bool) {
fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey) fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey)
if err != nil { if err != nil {
tb.Errorf("Failed to establish SecretConnection for foo: %v", err) tb.Errorf("Failed to establish SecretConnection for foo: %v", err)
return return nil, err, true
} }
remotePubBytes := fooSecConn.RemotePubKey() remotePubBytes := fooSecConn.RemotePubKey()
if !remotePubBytes.Equals(barPubKey) { if !remotePubBytes.Equals(barPubKey) {
tb.Errorf("Unexpected fooSecConn.RemotePubKey. Expected %v, got %v", err = fmt.Errorf("Unexpected fooSecConn.RemotePubKey. Expected %v, got %v",
barPubKey, fooSecConn.RemotePubKey()) barPubKey, fooSecConn.RemotePubKey())
tb.Error(err)
return nil, err, false
} }
return nil, nil, false
}, },
func() { func(_ int) (val interface{}, err error, abort bool) {
var err error
barSecConn, err = MakeSecretConnection(barConn, barPrvKey) barSecConn, err = MakeSecretConnection(barConn, barPrvKey)
if barSecConn == nil { if barSecConn == nil {
tb.Errorf("Failed to establish SecretConnection for bar: %v", err) tb.Errorf("Failed to establish SecretConnection for bar: %v", err)
return return nil, err, true
} }
remotePubBytes := barSecConn.RemotePubKey() remotePubBytes := barSecConn.RemotePubKey()
if !remotePubBytes.Equals(fooPubKey) { if !remotePubBytes.Equals(fooPubKey) {
tb.Errorf("Unexpected barSecConn.RemotePubKey. Expected %v, got %v", err = fmt.Errorf("Unexpected barSecConn.RemotePubKey. Expected %v, got %v",
fooPubKey, barSecConn.RemotePubKey()) fooPubKey, barSecConn.RemotePubKey())
tb.Error(err)
return nil, nil, false
} }
}) return nil, nil, false
},
)
require.Nil(tb, trs.FirstError())
require.True(tb, ok, "Unexpected task abortion")
return return
} }
@@ -89,59 +102,76 @@ func TestSecretConnectionReadWrite(t *testing.T) {
} }
// A helper that will run with (fooConn, fooWrites, fooReads) and vice versa // A helper that will run with (fooConn, fooWrites, fooReads) and vice versa
genNodeRunner := func(nodeConn kvstoreConn, nodeWrites []string, nodeReads *[]string) func() { genNodeRunner := func(id string, nodeConn kvstoreConn, nodeWrites []string, nodeReads *[]string) cmn.Task {
return func() { return func(_ int) (interface{}, error, bool) {
// Node handskae // Initiate cryptographic private key and secret connection trhough nodeConn.
nodePrvKey := crypto.GenPrivKeyEd25519().Wrap() nodePrvKey := crypto.GenPrivKeyEd25519()
nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey) nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey)
if err != nil { if err != nil {
t.Errorf("Failed to establish SecretConnection for node: %v", err) t.Errorf("Failed to establish SecretConnection for node: %v", err)
return return nil, err, true
} }
// In parallel, handle reads and writes // In parallel, handle some reads and writes.
cmn.Parallel( var trs, ok = cmn.Parallel(
func() { func(_ int) (interface{}, error, bool) {
// Node writes // Node writes:
for _, nodeWrite := range nodeWrites { for _, nodeWrite := range nodeWrites {
n, err := nodeSecretConn.Write([]byte(nodeWrite)) n, err := nodeSecretConn.Write([]byte(nodeWrite))
if err != nil { if err != nil {
t.Errorf("Failed to write to nodeSecretConn: %v", err) t.Errorf("Failed to write to nodeSecretConn: %v", err)
return return nil, err, true
} }
if n != len(nodeWrite) { if n != len(nodeWrite) {
t.Errorf("Failed to write all bytes. Expected %v, wrote %v", len(nodeWrite), n) err = fmt.Errorf("Failed to write all bytes. Expected %v, wrote %v", len(nodeWrite), n)
return t.Error(err)
return nil, err, true
} }
} }
if err := nodeConn.PipeWriter.Close(); err != nil { if err := nodeConn.PipeWriter.Close(); err != nil {
t.Error(err) t.Error(err)
return nil, err, true
} }
return nil, nil, false
}, },
func() { func(_ int) (interface{}, error, bool) {
// Node reads // Node reads:
readBuffer := make([]byte, dataMaxSize) readBuffer := make([]byte, dataMaxSize)
for { for {
n, err := nodeSecretConn.Read(readBuffer) n, err := nodeSecretConn.Read(readBuffer)
if err == io.EOF { if err == io.EOF {
return return nil, nil, false
} else if err != nil { } else if err != nil {
t.Errorf("Failed to read from nodeSecretConn: %v", err) t.Errorf("Failed to read from nodeSecretConn: %v", err)
return return nil, err, true
} }
*nodeReads = append(*nodeReads, string(readBuffer[:n])) *nodeReads = append(*nodeReads, string(readBuffer[:n]))
} }
if err := nodeConn.PipeReader.Close(); err != nil { if err := nodeConn.PipeReader.Close(); err != nil {
t.Error(err) t.Error(err)
return nil, err, true
} }
}) return nil, nil, false
},
)
assert.True(t, ok, "Unexpected task abortion")
// If error:
if trs.FirstError() != nil {
return nil, trs.FirstError(), true
}
// Otherwise:
return nil, nil, false
} }
} }
// Run foo & bar in parallel // Run foo & bar in parallel
cmn.Parallel( var trs, ok = cmn.Parallel(
genNodeRunner(fooConn, fooWrites, &fooReads), genNodeRunner("foo", fooConn, fooWrites, &fooReads),
genNodeRunner(barConn, barWrites, &barReads), genNodeRunner("bar", barConn, barWrites, &barReads),
) )
require.Nil(t, trs.FirstError())
require.True(t, ok, "unexpected task abortion")
// A helper to ensure that the writes and reads match. // A helper to ensure that the writes and reads match.
// Additionally, small writes (<= dataMaxSize) must be atomically read. // Additionally, small writes (<= dataMaxSize) must be atomically read.
@@ -152,7 +182,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
var readCount = 0 var readCount = 0
for _, readChunk := range reads { for _, readChunk := range reads {
read += readChunk read += readChunk
readCount += 1 readCount++
if len(write) <= len(read) { if len(write) <= len(read) {
break break
} }
@@ -209,3 +239,12 @@ func BenchmarkSecretConnection(b *testing.B) {
} }
//barSecConn.Close() race condition //barSecConn.Close() race condition
} }
func fingerprint(bz []byte) []byte {
const fbsize = 40
if len(bz) < fbsize {
return bz
} else {
return bz[:fbsize]
}
}

13
p2p/conn/wire.go Normal file
View File

@@ -0,0 +1,13 @@
package conn
import (
amino "github.com/tendermint/go-amino"
"github.com/tendermint/go-crypto"
)
var cdc *amino.Codec = amino.NewCodec()
func init() {
crypto.RegisterAmino(cdc)
RegisterPacket(cdc)
}

View File

@@ -48,12 +48,12 @@ func (p *peer) Status() tmconn.ConnectionStatus {
} }
// Send does not do anything and just returns true. // Send does not do anything and just returns true.
func (p *peer) Send(byte, interface{}) bool { func (p *peer) Send(byte, []byte) bool {
return true return true
} }
// TrySend does not do anything and just returns true. // TrySend does not do anything and just returns true.
func (p *peer) TrySend(byte, interface{}) bool { func (p *peer) TrySend(byte, []byte) bool {
return true return true
} }

View File

@@ -1,10 +1,11 @@
package p2p package p2p
import ( import (
"math/rand"
"net" "net"
"sync" "sync"
"time" "time"
cmn "github.com/tendermint/tmlibs/common"
) )
const ( const (
@@ -124,7 +125,7 @@ func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error {
func (fc *FuzzedConnection) randomDuration() time.Duration { func (fc *FuzzedConnection) randomDuration() time.Duration {
maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000) maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000)
return time.Millisecond * time.Duration(rand.Int()%maxDelayMillis) // nolint: gas return time.Millisecond * time.Duration(cmn.RandInt()%maxDelayMillis) // nolint: gas
} }
// implements the fuzz (delay, kill conn) // implements the fuzz (delay, kill conn)
@@ -137,7 +138,7 @@ func (fc *FuzzedConnection) fuzz() bool {
switch fc.config.Mode { switch fc.config.Mode {
case FuzzModeDrop: case FuzzModeDrop:
// randomly drop the r/w, drop the conn, or sleep // randomly drop the r/w, drop the conn, or sleep
r := rand.Float64() r := cmn.RandFloat64()
if r <= fc.config.ProbDropRW { if r <= fc.config.ProbDropRW {
return true return true
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn { } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn {

View File

@@ -3,7 +3,6 @@ package p2p
import ( import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@@ -48,36 +47,35 @@ func PubKeyToID(pubKey crypto.PubKey) ID {
// If the file does not exist, it generates and saves a new NodeKey. // If the file does not exist, it generates and saves a new NodeKey.
func LoadOrGenNodeKey(filePath string) (*NodeKey, error) { func LoadOrGenNodeKey(filePath string) (*NodeKey, error) {
if cmn.FileExists(filePath) { if cmn.FileExists(filePath) {
nodeKey, err := loadNodeKey(filePath) nodeKey, err := LoadNodeKey(filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return nodeKey, nil return nodeKey, nil
} else {
return genNodeKey(filePath)
} }
return genNodeKey(filePath)
} }
func loadNodeKey(filePath string) (*NodeKey, error) { func LoadNodeKey(filePath string) (*NodeKey, error) {
jsonBytes, err := ioutil.ReadFile(filePath) jsonBytes, err := ioutil.ReadFile(filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
nodeKey := new(NodeKey) nodeKey := new(NodeKey)
err = json.Unmarshal(jsonBytes, nodeKey) err = cdc.UnmarshalJSON(jsonBytes, nodeKey)
if err != nil { if err != nil {
return nil, fmt.Errorf("Error reading NodeKey from %v: %v\n", filePath, err) return nil, fmt.Errorf("Error reading NodeKey from %v: %v", filePath, err)
} }
return nodeKey, nil return nodeKey, nil
} }
func genNodeKey(filePath string) (*NodeKey, error) { func genNodeKey(filePath string) (*NodeKey, error) {
privKey := crypto.GenPrivKeyEd25519().Wrap() privKey := crypto.GenPrivKeyEd25519()
nodeKey := &NodeKey{ nodeKey := &NodeKey{
PrivKey: privKey, PrivKey: privKey,
} }
jsonBytes, err := json.Marshal(nodeKey) jsonBytes, err := cdc.MarshalJSON(nodeKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }

Some files were not shown because too many files have changed in this diff Show More