mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-16 04:41:59 +00:00
Compare commits
246 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
7682ad9a60 | ||
|
3e92d295e4 | ||
|
661d336dd5 | ||
|
d1a00c684e | ||
|
db034e079a | ||
|
7d983a548b | ||
|
8311f5c611 | ||
|
628791e5a5 | ||
|
df857266b6 | ||
|
ddb3d8945d | ||
|
7f5908b622 | ||
|
24f7b9387a | ||
|
756818f940 | ||
|
2131f8d330 | ||
|
8ae2ffda89 | ||
|
75b97a5a65 | ||
|
7b99039c34 | ||
|
3ca7b10ad4 | ||
|
779c2a22d0 | ||
|
147a18b34a | ||
|
4382c8d28b | ||
|
944ebccfe9 | ||
|
fd1b0b997a | ||
|
abe912c610 | ||
|
66fcdf7c7a | ||
|
4e13a19339 | ||
|
7dd3c007c7 | ||
|
0d392a0442 | ||
|
7e4a704bd1 | ||
|
bf5e956087 | ||
|
2ccc3326ec | ||
|
83f7d5c95a | ||
|
2c129447fd | ||
|
b50339e8e7 | ||
|
8ff5b365dd | ||
|
1f0985689d | ||
|
3089bbf2b8 | ||
|
5feeb65cf0 | ||
|
715e74186c | ||
|
3a03fe5a15 | ||
|
d343560108 | ||
|
2b6db268cf | ||
|
14abdd57f3 | ||
|
1f3e4d2d9a | ||
|
29bfcb0a31 | ||
|
301845943c | ||
|
b0017c5460 | ||
|
0b61d22652 | ||
|
70b95135e6 | ||
|
a3d925ac1d | ||
|
cf9a03f698 | ||
|
7f8240dfde | ||
|
f8b152972f | ||
|
95875c55fc | ||
|
7fadde0b37 | ||
|
e36c79f713 | ||
|
2252071866 | ||
|
b700ed8e31 | ||
|
e1fd587ddd | ||
|
f74de4cb86 | ||
|
6c1572c9b8 | ||
|
60a1f49a5c | ||
|
740167202f | ||
|
e0017c8a97 | ||
|
4a78c1bb28 | ||
|
1d3f723ccc | ||
|
77408b7bde | ||
|
0cd1bd6d8b | ||
|
881d2ce31e | ||
|
ad79ead93d | ||
|
17a748c796 | ||
|
583599b19f | ||
|
044fe56b43 | ||
|
b46da19b74 | ||
|
d635783d07 | ||
|
f79e13af38 | ||
|
34e6474ad9 | ||
|
5c96e0c812 | ||
|
921a2b41f0 | ||
|
a8cec967ac | ||
|
044c500cc0 | ||
|
a917ec9ea2 | ||
|
d4ccc88676 | ||
|
12c85c4e60 | ||
|
90c0267bc1 | ||
|
8963bf08aa | ||
|
ede4f818fd | ||
|
126f63c0a2 | ||
|
aea8629272 | ||
|
cc50dc076a | ||
|
bf576f0097 | ||
|
377747b061 | ||
|
870a98ccc3 | ||
|
8eda3efa28 | ||
|
e2c3cc9685 | ||
|
2a6e71a753 | ||
|
340d273f83 | ||
|
54c63726b0 | ||
|
cb80ab2965 | ||
|
c48e772115 | ||
|
aa78fc14b5 | ||
|
399fb9aa70 | ||
|
ec3c91ac14 | ||
|
fae0603413 | ||
|
9af837c24d | ||
|
cc2b418f7f | ||
|
6ddc30ffc7 | ||
|
aa7d63f2ff | ||
|
50f2ef548c | ||
|
b9637f7185 | ||
|
88138c38cf | ||
|
5ad0da1750 | ||
|
9deb647303 | ||
|
b856c45b9c | ||
|
f0f1ebe013 | ||
|
34beff117a | ||
|
e2e8746044 | ||
|
78446fd99c | ||
|
daa258ea6d | ||
|
54c85ef8b9 | ||
|
de512c5923 | ||
|
8fd133ff19 | ||
|
f5ba931115 | ||
|
3370e40fe1 | ||
|
e509e8f354 | ||
|
aaef2f7c84 | ||
|
369aa5854f | ||
|
9d12e485e5 | ||
|
dc54fc707e | ||
|
212a5ebc2e | ||
|
2d2e769e96 | ||
|
bba35c1486 | ||
|
0a902ebf61 | ||
|
70c0f4b936 | ||
|
e19fa59b63 | ||
|
30d351b0c1 | ||
|
818314c5db | ||
|
700e4bb29d | ||
|
8ad4a1341f | ||
|
d87acc2d1b | ||
|
f2349b1092 | ||
|
7824b66f5b | ||
|
83048fb2fe | ||
|
9dde1a0bd4 | ||
|
bc9092d7db | ||
|
56b959b9f9 | ||
|
b3796e0aaa | ||
|
d24083b257 | ||
|
83ec9f773a | ||
|
9ceccbe9a4 | ||
|
35a4912449 | ||
|
01e008cedd | ||
|
d8af26e75a | ||
|
03ddb109ec | ||
|
2fd8496bc1 | ||
|
8d7640894d | ||
|
c5a657f540 | ||
|
1ea43e513d | ||
|
c9e11de2a7 | ||
|
f644fc5725 | ||
|
49278a7f9c | ||
|
b0728260e9 | ||
|
92ada55e5a | ||
|
8840ae6ae2 | ||
|
21dab22f17 | ||
|
10a849c27e | ||
|
236489aecf | ||
|
7108c66e3b | ||
|
797acbe911 | ||
|
0bf66deb3c | ||
|
d2f3e9faf4 | ||
|
37f1390473 | ||
|
9b5f21a650 | ||
|
8267920749 | ||
|
6c85e4be4f | ||
|
23a87304cc | ||
|
c14b39da5f | ||
|
57eee2466b | ||
|
5d66d1c28c | ||
|
fb47ca6d35 | ||
|
0013053fae | ||
|
1abbb11b44 | ||
|
54903adeff | ||
|
c08618f7e9 | ||
|
d578f7f81e | ||
|
043c6018b4 | ||
|
d0965cca05 | ||
|
b8ac67e240 | ||
|
350d584af8 | ||
|
e3e75376ec | ||
|
ab753abfa0 | ||
|
bab7877fa1 | ||
|
10f8101314 | ||
|
530626dab7 | ||
|
b96d28a42b | ||
|
3444bee47f | ||
|
cf3abe5096 | ||
|
ecdda69fab | ||
|
fc3fe9292f | ||
|
d396053872 | ||
|
e9a2389300 | ||
|
678a9a2e42 | ||
|
124032e3e9 | ||
|
4beac54bd9 | ||
|
39493bcd9a | ||
|
f96771e753 | ||
|
62f94a7948 | ||
|
e9b7221292 | ||
|
5fea1d2675 | ||
|
7a492e3612 | ||
|
b893df3348 | ||
|
742b5b705f | ||
|
0153d21f3b | ||
|
695ad5fe2d | ||
|
d8ca0580a8 | ||
|
525fc0ae5b | ||
|
311f18bebf | ||
|
d49a5166ac | ||
|
e560dd839f | ||
|
6f8d385dfa | ||
|
086544e367 | ||
|
eed0297ed5 | ||
|
b467515719 | ||
|
75df0d91ba | ||
|
05c0dfac12 | ||
|
bcde80fc4f | ||
|
5f6b996d22 | ||
|
74a3a2b56a | ||
|
b07d01f102 | ||
|
eed3959749 | ||
|
278b2344fe | ||
|
05095954c9 | ||
|
6fef4b080e | ||
|
259111d0e8 | ||
|
a707748893 | ||
|
130ae133c1 | ||
|
1d387d7452 | ||
|
612726d9f6 | ||
|
5888ddaab1 | ||
|
e6cecb9595 | ||
|
e36e507463 | ||
|
3c10f7a122 | ||
|
ca8c34f966 | ||
|
8355fee16a | ||
|
850da13622 | ||
|
a676b2aa10 |
@@ -13,3 +13,7 @@ indent_style = tab
|
||||
|
||||
[*.sh]
|
||||
indent_style = tab
|
||||
|
||||
[*.proto]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
124
CHANGELOG.md
124
CHANGELOG.md
@@ -1,10 +1,100 @@
|
||||
# Changelog
|
||||
|
||||
## Roadmap
|
||||
|
||||
BREAKING CHANGES:
|
||||
- Upgrade the header to support better proofs on validtors, results, evidence, and possibly more
|
||||
- Better support for injecting randomness
|
||||
- Pass evidence/voteInfo through ABCI
|
||||
- Upgrade consensus for more real-time use of evidence
|
||||
|
||||
FEATURES:
|
||||
- Peer reputation management
|
||||
- Use the chain as its own CA for nodes and validators
|
||||
- Tooling to run multiple blockchains/apps, possibly in a single process
|
||||
- State syncing (without transaction replay)
|
||||
- Improved support for querying history and state
|
||||
- Add authentication and rate-limitting to the RPC
|
||||
|
||||
IMPROVEMENTS:
|
||||
- Improve subtleties around mempool caching and logic
|
||||
- Consensus optimizations:
|
||||
- cache block parts for faster agreement after round changes
|
||||
- propagate block parts rarest first
|
||||
- Better testing of the consensus state machine (ie. use a DSL)
|
||||
- Auto compiled serialization/deserialization code instead of go-wire reflection
|
||||
|
||||
BUG FIXES:
|
||||
- Graceful handling/recovery for apps that have non-determinism or fail to halt
|
||||
- Graceful handling/recovery for violations of safety, or liveness
|
||||
|
||||
## 0.11.0 (September 22, 2017)
|
||||
|
||||
BREAKING:
|
||||
- state: every validator set change is persisted to disk, which required some changes to the `State` structure.
|
||||
- cmd: if there is no genesis, exit immediately instead of waiting around for one to show.
|
||||
- p2p: new `p2p.Peer` interface used for all reactor methods (instead of `*p2p.Peer` struct).
|
||||
- types: `Signer.Sign` returns an error.
|
||||
- rpc: various changes to match JSONRPC spec (http://www.jsonrpc.org/specification), including breaking ones:
|
||||
- requests that previously returned HTTP code 4XX now return 200 with an error code in the JSONRPC.
|
||||
- `rpctypes.RPCResponse` uses new `RPCError` type instead of `string`.
|
||||
- abci: Info, BeginBlock, InitChain all take structs
|
||||
|
||||
FEATURES:
|
||||
- rpc: `/validators?height=X` allows querying of validators at previous heights.
|
||||
- rpc: Leaving the `height` param empty for `/block`, `/validators`, and `/commit` will return the value for the latest height.
|
||||
|
||||
IMPROVEMENTS:
|
||||
- docs: Moved all docs from the website and tools repo in, converted to `.rst`, and cleaned up for presentation on `tendermint.readthedocs.io`
|
||||
|
||||
BUG FIXES:
|
||||
- fix WAL openning issue on Windows
|
||||
|
||||
## 0.10.4 (September 5, 2017)
|
||||
|
||||
IMPROVEMENTS:
|
||||
- docs: Added Slate docs to each rpc function (see rpc/core)
|
||||
- docs: Ported all website docs to Read The Docs
|
||||
- config: expose some p2p params to tweak performance: RecvRate, SendRate, and MaxMsgPacketPayloadSize
|
||||
- rpc: Upgrade the websocket client and server, including improved auto reconnect, and proper ping/pong
|
||||
|
||||
BUG FIXES:
|
||||
- consensus: fix panic on getVoteBitArray
|
||||
- consensus: hang instead of panicking on byzantine consensus failures
|
||||
- cmd: dont load config for version command
|
||||
|
||||
## 0.10.3 (August 10, 2017)
|
||||
|
||||
FEATURES:
|
||||
- control over empty block production:
|
||||
- new flag, `--consensus.create_empty_blocks`; when set to false, blocks are only created when there are txs or when the AppHash changes.
|
||||
- new config option, `consensus.create_empty_blocks_interval`; an empty block is created after this many seconds.
|
||||
- in normal operation, `create_empty_blocks = true` and `create_empty_blocks_interval = 0`, so blocks are being created all the time (as in all previous versions of tendermint). The number of empty blocks can be reduced by increasing `create_empty_blocks_interval` or by setting `create_empty_blocks = false`.
|
||||
- new `TxsAvailable()` method added to Mempool that returns a channel which fires when txs are available.
|
||||
- new heartbeat message added to consensus reactor to notify peers that a node is waiting for txs before entering propose step.
|
||||
- rpc: Add `syncing` field to response returned by `/status`. Is `true` while in fast-sync mode.
|
||||
|
||||
IMPROVEMENTS:
|
||||
- various improvements to documentation and code comments
|
||||
|
||||
BUG FIXES:
|
||||
- mempool: pass height into constructor so it doesn't always start at 0
|
||||
|
||||
## 0.10.2 (July 10, 2017)
|
||||
|
||||
FEATURES:
|
||||
- Enable lower latency block commits by adding consensus reactor sleep durations and p2p flush throttle timeout to the config
|
||||
|
||||
IMPROVEMENTS:
|
||||
- More detailed logging in the consensus reactor and state machine
|
||||
- More in-code documentation for many exposed functions, especially in consensus/reactor.go and p2p/switch.go
|
||||
- Improved readability for some function definitions and code blocks with long lines
|
||||
|
||||
## 0.10.1 (June 28, 2017)
|
||||
|
||||
FEATURES:
|
||||
- Use `--trace` to get stack traces for logged errors
|
||||
- types: GenesisDoc.ValidatorHash returns the hash of the genesis validator set
|
||||
- types: GenesisDoc.ValidatorHash returns the hash of the genesis validator set
|
||||
- types: GenesisDocFromFile parses a GenesiDoc from a JSON file
|
||||
|
||||
IMPROVEMENTS:
|
||||
@@ -28,7 +118,7 @@ Also includes the Grand Repo-Merge of 2017.
|
||||
BREAKING CHANGES:
|
||||
|
||||
- Config and Flags:
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11),
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11),
|
||||
containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig`
|
||||
- This affects the following flags:
|
||||
- `--seeds` is now `--p2p.seeds`
|
||||
@@ -41,16 +131,16 @@ containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusCon
|
||||
```
|
||||
[p2p]
|
||||
laddr="tcp://1.2.3.4:46656"
|
||||
|
||||
|
||||
[consensus]
|
||||
timeout_propose=1000
|
||||
```
|
||||
- Use viper and `DefaultConfig() / TestConfig()` functions to handle defaults, and remove `config/tendermint` and `config/tendermint_test`
|
||||
- Change some function and method signatures to
|
||||
- Change some function and method signatures to
|
||||
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) accomodate new config
|
||||
|
||||
- Logger
|
||||
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`.
|
||||
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`.
|
||||
See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details
|
||||
- Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!)
|
||||
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger
|
||||
@@ -93,7 +183,7 @@ IMPROVEMENTS:
|
||||
- Limit `/blockchain_info` call to return a maximum of 20 blocks
|
||||
- Use `.Wrap()` and `.Unwrap()` instead of eg. `PubKeyS` for `go-crypto` types
|
||||
- RPC JSON responses use pretty printing (via `json.MarshalIndent`)
|
||||
- Color code different instances of the consensus for tests
|
||||
- Color code different instances of the consensus for tests
|
||||
- Isolate viper to `cmd/tendermint/commands` and do not read config from file for tests
|
||||
|
||||
|
||||
@@ -121,7 +211,7 @@ IMPROVEMENTS:
|
||||
- WAL uses #ENDHEIGHT instead of #HEIGHT (#HEIGHT will stop working in 0.10.0)
|
||||
- Peers included via `--seeds`, under `seeds` in the config, or in `/dial_seeds` are now persistent, and will be reconnected to if the connection breaks
|
||||
|
||||
BUG FIXES:
|
||||
BUG FIXES:
|
||||
|
||||
- Fix bug in fast-sync where we stop syncing after a peer is removed, even if they're re-added later
|
||||
- Fix handshake replay to handle validator set changes and results of DeliverTx when we crash after app.Commit but before state.Save()
|
||||
@@ -137,7 +227,7 @@ message RequestQuery{
|
||||
bytes data = 1;
|
||||
string path = 2;
|
||||
uint64 height = 3;
|
||||
bool prove = 4;
|
||||
bool prove = 4;
|
||||
}
|
||||
|
||||
message ResponseQuery{
|
||||
@@ -161,7 +251,7 @@ type BlockMeta struct {
|
||||
}
|
||||
```
|
||||
|
||||
- `ValidatorSet.Proposer` is exposed as a field and persisted with the `State`. Use `GetProposer()` to initialize or update after validator-set changes.
|
||||
- `ValidatorSet.Proposer` is exposed as a field and persisted with the `State`. Use `GetProposer()` to initialize or update after validator-set changes.
|
||||
|
||||
- `tendermint gen_validator` command output is now pure JSON
|
||||
|
||||
@@ -204,7 +294,7 @@ type BlockID struct {
|
||||
}
|
||||
```
|
||||
|
||||
- `Vote` data type now includes validator address and index:
|
||||
- `Vote` data type now includes validator address and index:
|
||||
|
||||
```
|
||||
type Vote struct {
|
||||
@@ -224,7 +314,7 @@ type Vote struct {
|
||||
|
||||
FEATURES:
|
||||
|
||||
- New message type on the ConsensusReactor, `Maj23Msg`, for peers to alert others they've seen a Maj23,
|
||||
- New message type on the ConsensusReactor, `Maj23Msg`, for peers to alert others they've seen a Maj23,
|
||||
in order to track and handle conflicting votes intelligently to prevent Byzantine faults from causing halts:
|
||||
|
||||
```
|
||||
@@ -247,7 +337,7 @@ IMPROVEMENTS:
|
||||
- Less verbose logging
|
||||
- Better test coverage (37% -> 49%)
|
||||
- Canonical SignBytes for signable types
|
||||
- Write-Ahead Log for Mempool and Consensus via tmlibs/autofile
|
||||
- Write-Ahead Log for Mempool and Consensus via tmlibs/autofile
|
||||
- Better in-process testing for the consensus reactor and byzantine faults
|
||||
- Better crash/restart testing for individual nodes at preset failure points, and of networks at arbitrary points
|
||||
- Better abstraction over timeout mechanics
|
||||
@@ -327,7 +417,7 @@ FEATURES:
|
||||
- TMSP and RPC support TCP and UNIX sockets
|
||||
- Addition config options including block size and consensus parameters
|
||||
- New WAL mode `cswal_light`; logs only the validator's own votes
|
||||
- New RPC endpoints:
|
||||
- New RPC endpoints:
|
||||
- for starting/stopping profilers, and for updating config
|
||||
- `/broadcast_tx_commit`, returns when tx is included in a block, else an error
|
||||
- `/unsafe_flush_mempool`, empties the mempool
|
||||
@@ -348,14 +438,14 @@ BUG FIXES:
|
||||
|
||||
Strict versioning only began with the release of v0.7.0, in late summer 2016.
|
||||
The project itself began in early summer 2014 and was workable decentralized cryptocurrency software by the end of that year.
|
||||
Through the course of 2015, in collaboration with Eris Industries (now Monax Indsutries),
|
||||
Through the course of 2015, in collaboration with Eris Industries (now Monax Indsutries),
|
||||
many additional features were integrated, including an implementation from scratch of the Ethereum Virtual Machine.
|
||||
That implementation now forms the heart of [ErisDB](https://github.com/eris-ltd/eris-db).
|
||||
That implementation now forms the heart of [Burrow](https://github.com/hyperledger/burrow).
|
||||
In the later half of 2015, the consensus algorithm was upgraded with a more asynchronous design and a more deterministic and robust implementation.
|
||||
|
||||
By late 2015, frustration with the difficulty of forking a large monolithic stack to create alternative cryptocurrency designs led to the
|
||||
By late 2015, frustration with the difficulty of forking a large monolithic stack to create alternative cryptocurrency designs led to the
|
||||
invention of the Application Blockchain Interface (ABCI), then called the Tendermint Socket Protocol (TMSP).
|
||||
The Ethereum Virtual Machine and various other transaction features were removed, and Tendermint was whittled down to a core consensus engine
|
||||
driving an application running in another process.
|
||||
driving an application running in another process.
|
||||
The ABCI interface and implementation were iterated on and improved over the course of 2016,
|
||||
until versioned history kicked in with v0.7.0.
|
||||
|
@@ -1,16 +1,77 @@
|
||||
# Contributing guidelines
|
||||
# Contributing
|
||||
|
||||
**Thanks for considering making contributions to Tendermint!**
|
||||
Thank you for considering making contributions to Tendermint and related repositories! Start by taking a look at the [coding repo](https://github.com/tendermint/coding) for overall information on repository workflow and standards.
|
||||
|
||||
Please follow standard github best practices: fork the repo, **branch from the
|
||||
tip of develop**, make some commits, test your code changes with `make test`,
|
||||
and submit a pull request to develop.
|
||||
Please follow standard github best practices: fork the repo, branch from the tip of develop, make some commits, and submit a pull request to develop. See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with!
|
||||
|
||||
See the [open issues](https://github.com/tendermint/tendermint/issues) for
|
||||
things we need help with!
|
||||
Please make sure to use `gofmt` before every commit - the easiest way to do this is have your editor run it for you upon saving a file.
|
||||
|
||||
Please make sure to use `gofmt` before every commit - the easiest way to do
|
||||
this is have your editor run it for you upon saving a file.
|
||||
## Forking
|
||||
|
||||
You can read the full guide [on our
|
||||
site](https://tendermint.com/docs/guides/contributing).
|
||||
Please note that Go requires code to live under absolute paths, which complicates forking.
|
||||
While my fork lives at `https://github.com/ebuchman/tendermint`,
|
||||
the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`.
|
||||
Instead, we use `git remote` to add the fork as a new remote for the original repo,
|
||||
`$GOPATH/src/github.com/tendermint/tendermint `, and do all the work there.
|
||||
|
||||
For instance, to create a fork and work on a branch of it, I would:
|
||||
|
||||
* Create the fork on github, using the fork button.
|
||||
* Go to the original repo checked out locally (ie. `$GOPATH/src/github.com/tendermint/tendermint`)
|
||||
* `git remote rename origin upstream`
|
||||
* `git remote add origin git@github.com:ebuchman/basecoin.git`
|
||||
|
||||
Now `origin` refers to my fork and `upstream` refers to the tendermint version.
|
||||
So I can `git push -u origin master` to update my fork, and make pull requests to tendermint from there.
|
||||
Of course, replace `ebuchman` with your git handle.
|
||||
|
||||
To pull in updates from the origin repo, run
|
||||
|
||||
* `git fetch upstream`
|
||||
* `git rebase upstream/master` (or whatever branch you want)
|
||||
|
||||
Please don't make Pull Requests to `master`.
|
||||
|
||||
## Dependencies
|
||||
|
||||
We use [glide](https://github.com/masterminds/glide) to manage dependencies.
|
||||
That said, the master branch of every Tendermint repository should just build with `go get`, which means they should be kept up-to-date with their dependencies so we can get away with telling people they can just `go get` our software.
|
||||
Since some dependencies are not under our control, a third party may break our build, in which case we can fall back on `glide install`. Even for dependencies under our control, glide helps us keeps multiple repos in sync as they evolve. Anything with an executable, such as apps, tools, and the core, should use glide.
|
||||
|
||||
Run `bash scripts/glide/status.sh` to get a list of vendored dependencies that may not be up-to-date.
|
||||
|
||||
## Testing
|
||||
|
||||
All repos should be hooked up to circle.
|
||||
If they have `.go` files in the root directory, they will be automatically tested by circle using `go test -v -race ./...`. If not, they will need a `circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and includes its continuous integration status using a badge in the `README.md`.
|
||||
|
||||
## Branching Model and Release
|
||||
|
||||
User-facing repos should adhere to the branching model: http://nvie.com/posts/a-successful-git-branching-model/.
|
||||
That is, these repos should be well versioned, and any merge to master requires a version bump and tagged release.
|
||||
|
||||
Libraries need not follow the model strictly, but would be wise to,
|
||||
especially `go-p2p` and `go-rpc`, as their versions are referenced in tendermint core.
|
||||
|
||||
### Development Procedure:
|
||||
- the latest state of development is on `develop`
|
||||
- `develop` must never fail `make test`
|
||||
- no --force onto `develop` (except when reverting a broken commit, which should seldom happen)
|
||||
- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git add origin`)
|
||||
- before submitting a pull request, begin `git rebase` on top of `develop`
|
||||
|
||||
### Pull Merge Procedure:
|
||||
- ensure pull branch is rebased on develop
|
||||
- run `make test` to ensure that all tests pass
|
||||
- merge pull request
|
||||
- the `unstable` branch may be used to aggregate pull merges before testing once
|
||||
- push master may request that pull requests be rebased on top of `unstable`
|
||||
|
||||
### Release Procedure:
|
||||
- start on `develop`
|
||||
- run integration tests (see `test_integrations` in Makefile)
|
||||
- prepare changelog/release issue
|
||||
- bump versions
|
||||
- push to release-vX.X.X to run the extended integration tests on the CI
|
||||
- merge to master
|
||||
- merge master back to develop
|
||||
|
58
INSTALL.md
58
INSTALL.md
@@ -1,57 +1 @@
|
||||
# Install Go
|
||||
|
||||
[Install Go, set the `GOPATH`, and put `GOPATH/bin` on your `PATH`](https://github.com/tendermint/tendermint/wiki/Setting-GOPATH).
|
||||
|
||||
# Install Tendermint
|
||||
|
||||
You should be able to install the latest with a simple `go get -u github.com/tendermint/tendermint/cmd/tendermint`.
|
||||
The `-u` makes sure all dependencies are updated as well.
|
||||
|
||||
Run `tendermint version` and `tendermint --help`.
|
||||
|
||||
If the install falied, see [vendored dependencies below](#vendored-dependencies).
|
||||
|
||||
To start a one-node blockchain with a simple in-process application:
|
||||
|
||||
```
|
||||
tendermint init
|
||||
tendermint node --proxy_app=dummy
|
||||
```
|
||||
|
||||
See the [application developers guide](https://github.com/tendermint/tendermint/wiki/Application-Developers) for more details on building and running applications.
|
||||
|
||||
|
||||
## Vendored dependencies
|
||||
|
||||
If the `go get` failed, updated dependencies may have broken the build.
|
||||
Install the correct version of each dependency using `glide`.
|
||||
|
||||
Fist, install `glide`:
|
||||
|
||||
```
|
||||
go get github.com/Masterminds/glide
|
||||
```
|
||||
|
||||
Now, fetch the dependencies and install them with `glide` and `go`:
|
||||
|
||||
```
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
glide install
|
||||
go install ./cmd/tendermint
|
||||
```
|
||||
|
||||
Sometimes `glide install` is painfully slow. Hang in there champ.
|
||||
|
||||
The latest Tendermint Core version is now installed. Check by running `tendermint version`.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If `go get` failing bothers you, fetch the code using `git`:
|
||||
|
||||
```
|
||||
mkdir -p $GOPATH/src/github.com/tendermint
|
||||
git clone https://github.com/tendermint/tendermint $GOPATH/src/github.com/tendermint/tendermint
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
glide install
|
||||
go install ./cmd/tendermint
|
||||
```
|
||||
The installation guide has moved to the [docs directory](docs/guides/install-from-source.md) in order to easily be rendered by the website. Please update your links accordingly.
|
||||
|
91
README.md
91
README.md
@@ -1,77 +1,104 @@
|
||||
# Tendermint
|
||||
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short.
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://godoc.org/github.com/tendermint/tendermint)
|
||||
[](http://forum.tendermint.com:3000/)
|
||||
[](https://cosmos.rocket.chat/)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
|
||||
|
||||
Branch | Tests | Coverage | Report Card
|
||||
----------|-------|----------|-------------
|
||||
develop | [](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [](https://codecov.io/gh/tendermint/tendermint) | [](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/develop)
|
||||
master | [](https://circleci.com/gh/tendermint/tendermint/tree/master) | [](https://codecov.io/gh/tendermint/tendermint) | [](https://goreportcard.com/report/github.com/tendermint/tendermint/tree/master)
|
||||
Branch | Tests | Coverage
|
||||
----------|-------|----------
|
||||
master | [](https://circleci.com/gh/tendermint/tendermint/tree/master) | [](https://codecov.io/gh/tendermint/tendermint)
|
||||
develop | [](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [](https://codecov.io/gh/tendermint/tendermint)
|
||||
|
||||
_NOTE: This is alpha software. Please contact us if you intend to run it in production._
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language,
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
|
||||
For more background, see the [introduction](https://tendermint.com/intro).
|
||||
|
||||
To get started developing applications, see the [application developers guide](https://tendermint.com/docs/guides/app-development).
|
||||
|
||||
### Code of Conduct
|
||||
Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md).
|
||||
For more information, from introduction to install to application development, [Read The Docs](http://tendermint.readthedocs.io/projects/tools/en/master).
|
||||
|
||||
## Install
|
||||
|
||||
To download pre-built binaries, see our [downloads page](https://tendermint.com/intro/getting-started/download).
|
||||
To download pre-built binaries, see our [downloads page](https://tendermint.com/downloads).
|
||||
|
||||
To install from source, you should be able to:
|
||||
|
||||
`go get -u github.com/tendermint/tendermint/cmd/tendermint`
|
||||
|
||||
For more details (or if it fails), see the [install guide](https://tendermint.com/docs/guides/install).
|
||||
|
||||
## Contributing
|
||||
|
||||
Yay open source! Please see our [contributing guidelines](https://tendermint.com/docs/guides/contributing).
|
||||
For more details (or if it fails), [read the docs](http://tendermint.readthedocs.io/projects/tools/en/master/install.html).
|
||||
|
||||
## Resources
|
||||
|
||||
### Tendermint Core
|
||||
|
||||
- [Introduction](https://tendermint.com/intro)
|
||||
- [Docs](https://tendermint.com/docs)
|
||||
- [Software using Tendermint](https://tendermint.com/ecosystem)
|
||||
All resources involving the use of, building application on, or developing for, tendermint, can be found at [Read The Docs](http://tendermint.readthedocs.io/projects/tools/en/master). Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs.
|
||||
|
||||
### Sub-projects
|
||||
|
||||
* [ABCI](http://github.com/tendermint/abci), the Application Blockchain Interface
|
||||
* [Go-Wire](http://github.com/tendermint/go-wire), a deterministic serialization library
|
||||
* [Go-Crypto](http://github.com/tendermint/go-crypto), an elliptic curve cryptography library
|
||||
* [TmLibs](http://github.com/tendermint/tmlibs), an assortment of Go libraries
|
||||
* [Merkleeyes](http://github.com/tendermint/merkleeyes), a balanced, binary Merkle tree for ABCI apps
|
||||
* [Go-Crypto](http://github.com/tendermint/go-crypto), an elliptic curve cryptography library
|
||||
* [TmLibs](http://github.com/tendermint/tmlibs), an assortment of Go libraries used internally
|
||||
* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation
|
||||
|
||||
### Tools
|
||||
* [Deployment, Benchmarking, and Monitoring](https://github.com/tendermint/tools)
|
||||
* [Deployment, Benchmarking, and Monitoring](http://tendermint.readthedocs.io/projects/tools/en/develop/index.html#tendermint-tools)
|
||||
|
||||
### Applications
|
||||
|
||||
* [Ethermint](http://github.com/tendermint/ethermint): Ethereum on Tendermint
|
||||
* [Basecoin](http://github.com/tendermint/basecoin), a cryptocurrency application framework
|
||||
* [Ethermint](http://github.com/tendermint/ethermint); Ethereum on Tendermint
|
||||
* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
|
||||
### More
|
||||
### More
|
||||
|
||||
* [Tendermint Blog](https://tendermint.com/blog)
|
||||
* [Cosmos Blog](https://cosmos.network/blog)
|
||||
* [Original Whitepaper (out-of-date)](http://www.the-blockchain.com/docs/Tendermint%20Consensus%20without%20Mining.pdf)
|
||||
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf)
|
||||
* [Tendermint Blog](https://blog.cosmos.network/tendermint/home)
|
||||
* [Cosmos Blog](https://blog.cosmos.network)
|
||||
|
||||
## Contributing
|
||||
|
||||
Yay open source! Please see our [contributing guidelines](CONTRIBUTING.md).
|
||||
|
||||
## Versioning
|
||||
|
||||
### SemVer
|
||||
|
||||
Tendermint uses [SemVer](http://semver.org/) to determine when and how the version changes.
|
||||
According to SemVer, anything in the public API can change at any time before version 1.0.0
|
||||
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
to signal breaking changes across a subset of the total public API. This subset includes all
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), as well as parts of the following packages:
|
||||
|
||||
- types
|
||||
- rpc/client
|
||||
- config
|
||||
- node
|
||||
|
||||
Exported objects in these packages that are not covered by the versioning scheme
|
||||
are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any time.
|
||||
Functions, types, and values in any other package may also change at any time.
|
||||
|
||||
### Upgrades
|
||||
|
||||
In an effort to avoid accumulating technical debt prior to 1.0.0,
|
||||
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
|
||||
will work with existing tendermint blockchains. In these cases you will
|
||||
have to start a new blockchain, or write something custom to get the old
|
||||
data into the new chain.
|
||||
|
||||
However, any bump in the PATCH version should be compatible with existing histories
|
||||
(if not please open an [issue](https://github.com/tendermint/tendermint/issues)).
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md).
|
||||
|
@@ -1,30 +1,28 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
//"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/tendermint/go-wire"
|
||||
_ "github.com/tendermint/tendermint/rpc/core/types" // Register RPCResponse > Result types
|
||||
"github.com/tendermint/tendermint/rpc/lib/client"
|
||||
"github.com/tendermint/tendermint/rpc/lib/types"
|
||||
. "github.com/tendermint/tmlibs/common"
|
||||
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ws := rpcclient.NewWSClient("127.0.0.1:46657", "/websocket")
|
||||
_, err := ws.Start()
|
||||
wsc := rpcclient.NewWSClient("127.0.0.1:46657", "/websocket")
|
||||
_, err := wsc.Start()
|
||||
if err != nil {
|
||||
Exit(err.Error())
|
||||
cmn.Exit(err.Error())
|
||||
}
|
||||
defer wsc.Stop()
|
||||
|
||||
// Read a bunch of responses
|
||||
go func() {
|
||||
for {
|
||||
_, ok := <-ws.ResultsCh
|
||||
_, ok := <-wsc.ResultsCh
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
@@ -37,24 +35,14 @@ func main() {
|
||||
for i := 0; ; i++ {
|
||||
binary.BigEndian.PutUint64(buf, uint64(i))
|
||||
//txBytes := hex.EncodeToString(buf[:n])
|
||||
request, err := rpctypes.MapToRequest("fakeid",
|
||||
"broadcast_tx",
|
||||
map[string]interface{}{"tx": buf[:8]})
|
||||
if err != nil {
|
||||
Exit(err.Error())
|
||||
}
|
||||
reqBytes := wire.JSONBytes(request)
|
||||
//fmt.Println("!!", string(reqBytes))
|
||||
fmt.Print(".")
|
||||
err = ws.WriteMessage(websocket.TextMessage, reqBytes)
|
||||
err = wsc.Call(context.TODO(), "broadcast_tx", map[string]interface{}{"tx": buf[:8]})
|
||||
if err != nil {
|
||||
Exit(err.Error())
|
||||
cmn.Exit(err.Error())
|
||||
}
|
||||
if i%1000 == 0 {
|
||||
fmt.Println(i)
|
||||
}
|
||||
time.Sleep(time.Microsecond * 1000)
|
||||
}
|
||||
|
||||
ws.Stop()
|
||||
}
|
||||
|
@@ -352,7 +352,7 @@ func (peer *bpPeer) setLogger(l log.Logger) {
|
||||
|
||||
func (peer *bpPeer) resetMonitor() {
|
||||
peer.recvMonitor = flow.New(time.Second, time.Second*40)
|
||||
var initialValue = float64(minRecvRate) * math.E
|
||||
initialValue := float64(minRecvRate) * math.E
|
||||
peer.recvMonitor.SetREMA(initialValue)
|
||||
}
|
||||
|
||||
|
@@ -28,7 +28,6 @@ const (
|
||||
statusUpdateIntervalSeconds = 10
|
||||
// check if we should switch to consensus reactor
|
||||
switchToConsensusIntervalSeconds = 1
|
||||
maxBlockchainResponseSize = types.MaxBlockSize + 2
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
@@ -111,20 +110,20 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer *p2p.Peer) {
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
if !peer.Send(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) {
|
||||
// doing nothing, will try later in `poolRoutine`
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor by removing peer from the pool.
|
||||
func (bcR *BlockchainReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
|
||||
bcR.pool.RemovePeer(peer.Key)
|
||||
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
bcR.pool.RemovePeer(peer.Key())
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
|
||||
_, msg, err := DecodeMessage(msgBytes)
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
_, msg, err := DecodeMessage(msgBytes, bcR.maxMsgSize())
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error decoding message", "err", err)
|
||||
return
|
||||
@@ -148,7 +147,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
|
||||
}
|
||||
case *bcBlockResponseMessage:
|
||||
// Got a block.
|
||||
bcR.pool.AddBlock(src.Key, msg.Block, len(msgBytes))
|
||||
bcR.pool.AddBlock(src.Key(), msg.Block, len(msgBytes))
|
||||
case *bcStatusRequestMessage:
|
||||
// Send peer our state.
|
||||
queued := src.TrySend(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}})
|
||||
@@ -157,12 +156,18 @@ func (bcR *BlockchainReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte)
|
||||
}
|
||||
case *bcStatusResponseMessage:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerHeight(src.Key, msg.Height)
|
||||
bcR.pool.SetPeerHeight(src.Key(), msg.Height)
|
||||
default:
|
||||
bcR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
}
|
||||
|
||||
// maxMsgSize returns the maximum allowable size of a
|
||||
// message on the blockchain reactor.
|
||||
func (bcR *BlockchainReactor) maxMsgSize() int {
|
||||
return bcR.state.Params().BlockSizeParams.MaxBytes + 2
|
||||
}
|
||||
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.)
|
||||
@@ -221,7 +226,7 @@ FOR_LOOP:
|
||||
// We need both to sync the first block.
|
||||
break SYNC_LOOP
|
||||
}
|
||||
firstParts := first.MakePartSet(types.DefaultBlockPartSize)
|
||||
firstParts := first.MakePartSet(bcR.state.Params().BlockPartSizeBytes)
|
||||
firstPartsHeader := firstParts.Header()
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
@@ -290,11 +295,11 @@ var _ = wire.RegisterInterface(
|
||||
|
||||
// DecodeMessage decodes BlockchainMessage.
|
||||
// TODO: ensure that bz is completely read.
|
||||
func DecodeMessage(bz []byte) (msgType byte, msg BlockchainMessage, err error) {
|
||||
func DecodeMessage(bz []byte, maxSize int) (msgType byte, msg BlockchainMessage, err error) {
|
||||
msgType = bz[0]
|
||||
n := int(0)
|
||||
r := bytes.NewReader(bz)
|
||||
msg = wire.ReadBinary(struct{ BlockchainMessage }{}, r, maxBlockchainResponseSize, &n, &err).(struct{ BlockchainMessage }).BlockchainMessage
|
||||
msg = wire.ReadBinary(struct{ BlockchainMessage }{}, r, maxSize, &n, &err).(struct{ BlockchainMessage }).BlockchainMessage
|
||||
if err != nil && n != len(bz) {
|
||||
err = errors.New("DecodeMessage() had bytes left over")
|
||||
}
|
||||
|
@@ -9,18 +9,16 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var genValidatorCmd = &cobra.Command{
|
||||
// GenValidatorCmd allows the generation of a keypair for a
|
||||
// validator.
|
||||
var GenValidatorCmd = &cobra.Command{
|
||||
Use: "gen_validator",
|
||||
Short: "Generate new validator keypair",
|
||||
Run: genValidator,
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(genValidatorCmd)
|
||||
}
|
||||
|
||||
func genValidator(cmd *cobra.Command, args []string) {
|
||||
privValidator := types.GenPrivValidator()
|
||||
privValidator := types.GenPrivValidatorFS("")
|
||||
privValidatorJSONBytes, _ := json.MarshalIndent(privValidator, "", "\t")
|
||||
fmt.Printf(`%v
|
||||
`, string(privValidatorJSONBytes))
|
||||
|
@@ -9,21 +9,17 @@ import (
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
var initFilesCmd = &cobra.Command{
|
||||
// InitFilesCmd initialises a fresh Tendermint Core instance.
|
||||
var InitFilesCmd = &cobra.Command{
|
||||
Use: "init",
|
||||
Short: "Initialize Tendermint",
|
||||
Run: initFiles,
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(initFilesCmd)
|
||||
}
|
||||
|
||||
func initFiles(cmd *cobra.Command, args []string) {
|
||||
privValFile := config.PrivValidatorFile()
|
||||
if _, err := os.Stat(privValFile); os.IsNotExist(err) {
|
||||
privValidator := types.GenPrivValidator()
|
||||
privValidator.SetFile(privValFile)
|
||||
privValidator := types.GenPrivValidatorFS(privValFile)
|
||||
privValidator.Save()
|
||||
|
||||
genFile := config.GenesisFile()
|
||||
@@ -33,8 +29,8 @@ func initFiles(cmd *cobra.Command, args []string) {
|
||||
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
|
||||
}
|
||||
genDoc.Validators = []types.GenesisValidator{types.GenesisValidator{
|
||||
PubKey: privValidator.PubKey,
|
||||
Amount: 10,
|
||||
PubKey: privValidator.GetPubKey(),
|
||||
Power: 10,
|
||||
}}
|
||||
|
||||
genDoc.SaveAs(genFile)
|
||||
|
@@ -9,16 +9,13 @@ import (
|
||||
"github.com/tendermint/tendermint/p2p/upnp"
|
||||
)
|
||||
|
||||
var probeUpnpCmd = &cobra.Command{
|
||||
// ProbeUpnpCmd adds capabilities to test the UPnP functionality.
|
||||
var ProbeUpnpCmd = &cobra.Command{
|
||||
Use: "probe_upnp",
|
||||
Short: "Test UPnP functionality",
|
||||
RunE: probeUpnp,
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(probeUpnpCmd)
|
||||
}
|
||||
|
||||
func probeUpnp(cmd *cobra.Command, args []string) error {
|
||||
capabilities, err := upnp.Probe(logger)
|
||||
if err != nil {
|
||||
|
@@ -6,7 +6,8 @@ import (
|
||||
"github.com/tendermint/tendermint/consensus"
|
||||
)
|
||||
|
||||
var replayCmd = &cobra.Command{
|
||||
// ReplayCmd allows replaying of messages from the WAL.
|
||||
var ReplayCmd = &cobra.Command{
|
||||
Use: "replay",
|
||||
Short: "Replay messages from WAL",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
@@ -14,15 +15,12 @@ var replayCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var replayConsoleCmd = &cobra.Command{
|
||||
// ReplayConsoleCmd allows replaying of messages from the WAL in a
|
||||
// console.
|
||||
var ReplayConsoleCmd = &cobra.Command{
|
||||
Use: "replay_console",
|
||||
Short: "Replay messages from WAL in a console",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
consensus.RunReplayFile(config.BaseConfig, config.Consensus, true)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(replayCmd)
|
||||
RootCmd.AddCommand(replayConsoleCmd)
|
||||
}
|
||||
|
@@ -9,21 +9,27 @@ import (
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
)
|
||||
|
||||
var resetAllCmd = &cobra.Command{
|
||||
// ResetAllCmd removes the database of this Tendermint core
|
||||
// instance.
|
||||
var ResetAllCmd = &cobra.Command{
|
||||
Use: "unsafe_reset_all",
|
||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator",
|
||||
Run: resetAll,
|
||||
}
|
||||
|
||||
var resetPrivValidatorCmd = &cobra.Command{
|
||||
// ResetPrivValidatorCmd resets the private validator files.
|
||||
var ResetPrivValidatorCmd = &cobra.Command{
|
||||
Use: "unsafe_reset_priv_validator",
|
||||
Short: "(unsafe) Reset this node's validator",
|
||||
Run: resetPrivValidator,
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(resetAllCmd)
|
||||
RootCmd.AddCommand(resetPrivValidatorCmd)
|
||||
// ResetAll removes the privValidator files.
|
||||
// Exported so other CLI tools can use it
|
||||
func ResetAll(dbDir, privValFile string, logger log.Logger) {
|
||||
resetPrivValidatorFS(privValFile, logger)
|
||||
os.RemoveAll(dbDir)
|
||||
logger.Info("Removed all data", "dir", dbDir)
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
@@ -35,26 +41,17 @@ func resetAll(cmd *cobra.Command, args []string) {
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) {
|
||||
resetPrivValidatorLocal(config.PrivValidatorFile(), logger)
|
||||
resetPrivValidatorFS(config.PrivValidatorFile(), logger)
|
||||
}
|
||||
|
||||
// Exported so other CLI tools can use it
|
||||
func ResetAll(dbDir, privValFile string, logger log.Logger) {
|
||||
resetPrivValidatorLocal(privValFile, logger)
|
||||
os.RemoveAll(dbDir)
|
||||
logger.Info("Removed all data", "dir", dbDir)
|
||||
}
|
||||
|
||||
func resetPrivValidatorLocal(privValFile string, logger log.Logger) {
|
||||
func resetPrivValidatorFS(privValFile string, logger log.Logger) {
|
||||
// Get PrivValidator
|
||||
var privValidator *types.PrivValidator
|
||||
if _, err := os.Stat(privValFile); err == nil {
|
||||
privValidator = types.LoadPrivValidator(privValFile)
|
||||
privValidator := types.LoadPrivValidatorFS(privValFile)
|
||||
privValidator.Reset()
|
||||
logger.Info("Reset PrivValidator", "file", privValFile)
|
||||
} else {
|
||||
privValidator = types.GenPrivValidator()
|
||||
privValidator.SetFile(privValFile)
|
||||
privValidator := types.GenPrivValidatorFS(privValFile)
|
||||
privValidator.Save()
|
||||
logger.Info("Generated PrivValidator", "file", privValFile)
|
||||
}
|
||||
|
@@ -21,7 +21,8 @@ func init() {
|
||||
RootCmd.PersistentFlags().String("log_level", config.LogLevel, "Log level")
|
||||
}
|
||||
|
||||
// ParseConfig will setup the tendermint configuration properly
|
||||
// ParseConfig retrieves the default environment configuration,
|
||||
// sets up the Tendermint root and ensures that the root exists
|
||||
func ParseConfig() (*cfg.Config, error) {
|
||||
conf := cfg.DefaultConfig()
|
||||
err := viper.Unmarshal(conf)
|
||||
@@ -33,10 +34,14 @@ func ParseConfig() (*cfg.Config, error) {
|
||||
return conf, err
|
||||
}
|
||||
|
||||
// RootCmd is the root command for Tendermint core.
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "tendermint",
|
||||
Short: "Tendermint Core (BFT Consensus) in Go",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
if cmd.Name() == VersionCmd.Name() {
|
||||
return nil
|
||||
}
|
||||
config, err = ParseConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@@ -23,8 +23,14 @@ const (
|
||||
)
|
||||
|
||||
// isolate provides a clean setup and returns a copy of RootCmd you can
|
||||
// modify in the test cases
|
||||
// modify in the test cases.
|
||||
// NOTE: it unsets all TM* env variables.
|
||||
func isolate(cmds ...*cobra.Command) cli.Executable {
|
||||
os.Unsetenv("TMHOME")
|
||||
os.Unsetenv("TM_HOME")
|
||||
os.Unsetenv("TMROOT")
|
||||
os.Unsetenv("TM_ROOT")
|
||||
|
||||
viper.Reset()
|
||||
config = cfg.DefaultConfig()
|
||||
r := &cobra.Command{
|
||||
|
@@ -2,26 +2,12 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/node"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
)
|
||||
|
||||
var runNodeCmd = &cobra.Command{
|
||||
Use: "node",
|
||||
Short: "Run the tendermint node",
|
||||
RunE: runNode,
|
||||
}
|
||||
|
||||
func init() {
|
||||
AddNodeFlags(runNodeCmd)
|
||||
RootCmd.AddCommand(runNodeCmd)
|
||||
}
|
||||
|
||||
// AddNodeFlags exposes some common configuration options on the command-line
|
||||
// These are exposed for convenience of commands embedding a tendermint node
|
||||
func AddNodeFlags(cmd *cobra.Command) {
|
||||
@@ -45,42 +31,37 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma delimited host:port seed nodes")
|
||||
cmd.Flags().Bool("p2p.skip_upnp", config.P2P.SkipUPNP, "Skip UPNP configuration")
|
||||
cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable Peer-Exchange (dev feature)")
|
||||
|
||||
// consensus flags
|
||||
cmd.Flags().Bool("consensus.create_empty_blocks", config.Consensus.CreateEmptyBlocks, "Set this to false to only produce blocks when there are txs or when the AppHash changes")
|
||||
}
|
||||
|
||||
// Users wishing to:
|
||||
// * Use an external signer for their validators
|
||||
// * Supply an in-proc abci app
|
||||
// should import github.com/tendermint/tendermint/node and implement
|
||||
// their own run_node to call node.NewNode (instead of node.NewNodeDefault)
|
||||
// with their custom priv validator and/or custom proxy.ClientCreator
|
||||
func runNode(cmd *cobra.Command, args []string) error {
|
||||
// NewRunNodeCmd returns the command that allows the CLI to start a
|
||||
// node. It can be used with a custom PrivValidator and in-process ABCI application.
|
||||
func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "node",
|
||||
Short: "Run the tendermint node",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Create & start node
|
||||
n, err := nodeProvider(config, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// Wait until the genesis doc becomes available
|
||||
// This is for Mintnet compatibility.
|
||||
// TODO: If Mintnet gets deprecated or genesis_file is
|
||||
// always available, remove.
|
||||
genDocFile := config.GenesisFile()
|
||||
for !cmn.FileExists(genDocFile) {
|
||||
logger.Info(cmn.Fmt("Waiting for genesis file %v...", genDocFile))
|
||||
time.Sleep(time.Second)
|
||||
if _, err := n.Start(); err != nil {
|
||||
return fmt.Errorf("Failed to start node: %v", err)
|
||||
} else {
|
||||
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
|
||||
}
|
||||
|
||||
// Trap signal, run forever.
|
||||
n.RunForever()
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
genDoc, err := types.GenesisDocFromFile(genDocFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ChainID = genDoc.ChainID
|
||||
|
||||
// Create & start node
|
||||
n := node.NewNodeDefault(config, logger.With("module", "node"))
|
||||
if _, err := n.Start(); err != nil {
|
||||
return fmt.Errorf("Failed to start node: %v", err)
|
||||
} else {
|
||||
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
|
||||
}
|
||||
|
||||
// Trap signal, run forever.
|
||||
n.RunForever()
|
||||
|
||||
return nil
|
||||
AddNodeFlags(cmd)
|
||||
return cmd
|
||||
}
|
||||
|
@@ -9,18 +9,15 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var showValidatorCmd = &cobra.Command{
|
||||
// ShowValidatorCmd adds capabilities for showing the validator info.
|
||||
var ShowValidatorCmd = &cobra.Command{
|
||||
Use: "show_validator",
|
||||
Short: "Show this node's validator info",
|
||||
Run: showValidator,
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(showValidatorCmd)
|
||||
}
|
||||
|
||||
func showValidator(cmd *cobra.Command, args []string) {
|
||||
privValidator := types.LoadOrGenPrivValidator(config.PrivValidatorFile(), logger)
|
||||
privValidator := types.LoadOrGenPrivValidatorFS(config.PrivValidatorFile())
|
||||
pubKeyJSONBytes, _ := data.ToJSON(privValidator.PubKey)
|
||||
fmt.Println(string(pubKeyJSONBytes))
|
||||
}
|
||||
|
@@ -7,16 +7,10 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
var testnetFilesCmd = &cobra.Command{
|
||||
Use: "testnet",
|
||||
Short: "Initialize files for a Tendermint testnet",
|
||||
Run: testnetFiles,
|
||||
}
|
||||
|
||||
//flags
|
||||
var (
|
||||
nValidators int
|
||||
@@ -24,12 +18,18 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
testnetFilesCmd.Flags().IntVar(&nValidators, "n", 4,
|
||||
TestnetFilesCmd.Flags().IntVar(&nValidators, "n", 4,
|
||||
"Number of validators to initialize the testnet with")
|
||||
testnetFilesCmd.Flags().StringVar(&dataDir, "dir", "mytestnet",
|
||||
TestnetFilesCmd.Flags().StringVar(&dataDir, "dir", "mytestnet",
|
||||
"Directory to store initialization data for the testnet")
|
||||
}
|
||||
|
||||
RootCmd.AddCommand(testnetFilesCmd)
|
||||
// TestnetFilesCmd allows initialisation of files for a
|
||||
// Tendermint testnet.
|
||||
var TestnetFilesCmd = &cobra.Command{
|
||||
Use: "testnet",
|
||||
Short: "Initialize files for a Tendermint testnet",
|
||||
Run: testnetFiles,
|
||||
}
|
||||
|
||||
func testnetFiles(cmd *cobra.Command, args []string) {
|
||||
@@ -45,10 +45,10 @@ func testnetFiles(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
// Read priv_validator.json to populate vals
|
||||
privValFile := path.Join(dataDir, mach, "priv_validator.json")
|
||||
privVal := types.LoadPrivValidator(privValFile)
|
||||
privVal := types.LoadPrivValidatorFS(privValFile)
|
||||
genVals[i] = types.GenesisValidator{
|
||||
PubKey: privVal.PubKey,
|
||||
Amount: 1,
|
||||
PubKey: privVal.GetPubKey(),
|
||||
Power: 1,
|
||||
Name: mach,
|
||||
}
|
||||
}
|
||||
@@ -87,7 +87,6 @@ func ensurePrivValidator(file string) {
|
||||
if cmn.FileExists(file) {
|
||||
return
|
||||
}
|
||||
privValidator := types.GenPrivValidator()
|
||||
privValidator.SetFile(file)
|
||||
privValidator := types.GenPrivValidatorFS(file)
|
||||
privValidator.Save()
|
||||
}
|
||||
|
@@ -8,14 +8,11 @@ import (
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
// VersionCmd ...
|
||||
var VersionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show version info",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Println(version.Version)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(versionCmd)
|
||||
}
|
||||
|
@@ -3,11 +3,39 @@ package main
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/tendermint/tendermint/cmd/tendermint/commands"
|
||||
"github.com/tendermint/tmlibs/cli"
|
||||
|
||||
cmd "github.com/tendermint/tendermint/cmd/tendermint/commands"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd := cli.PrepareBaseCmd(commands.RootCmd, "TM", os.ExpandEnv("$HOME/.tendermint"))
|
||||
rootCmd := cmd.RootCmd
|
||||
rootCmd.AddCommand(
|
||||
cmd.GenValidatorCmd,
|
||||
cmd.InitFilesCmd,
|
||||
cmd.ProbeUpnpCmd,
|
||||
cmd.ReplayCmd,
|
||||
cmd.ReplayConsoleCmd,
|
||||
cmd.ResetAllCmd,
|
||||
cmd.ResetPrivValidatorCmd,
|
||||
cmd.ShowValidatorCmd,
|
||||
cmd.TestnetFilesCmd,
|
||||
cmd.VersionCmd)
|
||||
|
||||
// NOTE:
|
||||
// Users wishing to:
|
||||
// * Use an external signer for their validators
|
||||
// * Supply an in-proc abci app
|
||||
// * Supply a genesis doc file from another source
|
||||
// * Provide their own DB implementation
|
||||
// can copy this file and use something other than the
|
||||
// DefaultNewNode function
|
||||
nodeFunc := nm.DefaultNewNode
|
||||
|
||||
// Create & start node
|
||||
rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc))
|
||||
|
||||
cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv("$HOME/.tendermint"))
|
||||
cmd.Execute()
|
||||
}
|
||||
|
155
config/config.go
155
config/config.go
@@ -4,10 +4,9 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Config defines the top level configuration for a Tendermint node
|
||||
type Config struct {
|
||||
// Top level options use an anonymous struct
|
||||
BaseConfig `mapstructure:",squash"`
|
||||
@@ -19,6 +18,7 @@ type Config struct {
|
||||
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
||||
}
|
||||
|
||||
// DefaultConfig returns a default configuration for a Tendermint node
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
BaseConfig: DefaultBaseConfig(),
|
||||
@@ -29,6 +29,7 @@ func DefaultConfig() *Config {
|
||||
}
|
||||
}
|
||||
|
||||
// TestConfig returns a configuration that can be used for testing
|
||||
func TestConfig() *Config {
|
||||
return &Config{
|
||||
BaseConfig: TestBaseConfig(),
|
||||
@@ -39,7 +40,7 @@ func TestConfig() *Config {
|
||||
}
|
||||
}
|
||||
|
||||
// Set the RootDir for all Config structs
|
||||
// SetRoot sets the RootDir for all Config structs
|
||||
func (cfg *Config) SetRoot(root string) *Config {
|
||||
cfg.BaseConfig.RootDir = root
|
||||
cfg.RPC.RootDir = root
|
||||
@@ -52,7 +53,7 @@ func (cfg *Config) SetRoot(root string) *Config {
|
||||
//-----------------------------------------------------------------------------
|
||||
// BaseConfig
|
||||
|
||||
// BaseConfig struct for a Tendermint node
|
||||
// BaseConfig defines the base configuration for a Tendermint node
|
||||
type BaseConfig struct {
|
||||
// The root directory for all data.
|
||||
// This should be set in viper so it can unmarshal into this struct
|
||||
@@ -102,6 +103,7 @@ type BaseConfig struct {
|
||||
DBPath string `mapstructure:"db_dir"`
|
||||
}
|
||||
|
||||
// DefaultBaseConfig returns a default base configuration for a Tendermint node
|
||||
func DefaultBaseConfig() BaseConfig {
|
||||
return BaseConfig{
|
||||
Genesis: "genesis.json",
|
||||
@@ -119,6 +121,7 @@ func DefaultBaseConfig() BaseConfig {
|
||||
}
|
||||
}
|
||||
|
||||
// TestBaseConfig returns a base configuration for testing a Tendermint node
|
||||
func TestBaseConfig() BaseConfig {
|
||||
conf := DefaultBaseConfig()
|
||||
conf.ChainID = "tendermint_test"
|
||||
@@ -128,22 +131,27 @@ func TestBaseConfig() BaseConfig {
|
||||
return conf
|
||||
}
|
||||
|
||||
// GenesisFile returns the full path to the genesis.json file
|
||||
func (b BaseConfig) GenesisFile() string {
|
||||
return rootify(b.Genesis, b.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorFile returns the full path to the priv_validator.json file
|
||||
func (b BaseConfig) PrivValidatorFile() string {
|
||||
return rootify(b.PrivValidator, b.RootDir)
|
||||
}
|
||||
|
||||
// DBDir returns the full path to the database directory
|
||||
func (b BaseConfig) DBDir() string {
|
||||
return rootify(b.DBPath, b.RootDir)
|
||||
}
|
||||
|
||||
// DefaultLogLevel returns a default log level of "error"
|
||||
func DefaultLogLevel() string {
|
||||
return "error"
|
||||
}
|
||||
|
||||
// DefaultPackageLogLevels returns a default log level setting so all packages log at "error", while the `state` package logs at "info"
|
||||
func DefaultPackageLogLevels() string {
|
||||
return fmt.Sprintf("state:info,*:%s", DefaultLogLevel())
|
||||
}
|
||||
@@ -151,6 +159,7 @@ func DefaultPackageLogLevels() string {
|
||||
//-----------------------------------------------------------------------------
|
||||
// RPCConfig
|
||||
|
||||
// RPCConfig defines the configuration options for the Tendermint RPC server
|
||||
type RPCConfig struct {
|
||||
RootDir string `mapstructure:"home"`
|
||||
|
||||
@@ -165,6 +174,7 @@ type RPCConfig struct {
|
||||
Unsafe bool `mapstructure:"unsafe"`
|
||||
}
|
||||
|
||||
// DefaultRPCConfig returns a default configuration for the RPC server
|
||||
func DefaultRPCConfig() *RPCConfig {
|
||||
return &RPCConfig{
|
||||
ListenAddress: "tcp://0.0.0.0:46657",
|
||||
@@ -173,6 +183,7 @@ func DefaultRPCConfig() *RPCConfig {
|
||||
}
|
||||
}
|
||||
|
||||
// TestRPCConfig returns a configuration for testing the RPC server
|
||||
func TestRPCConfig() *RPCConfig {
|
||||
conf := DefaultRPCConfig()
|
||||
conf.ListenAddress = "tcp://0.0.0.0:36657"
|
||||
@@ -184,26 +195,59 @@ func TestRPCConfig() *RPCConfig {
|
||||
//-----------------------------------------------------------------------------
|
||||
// P2PConfig
|
||||
|
||||
// P2PConfig defines the configuration options for the Tendermint peer-to-peer networking layer
|
||||
type P2PConfig struct {
|
||||
RootDir string `mapstructure:"home"`
|
||||
ListenAddress string `mapstructure:"laddr"`
|
||||
Seeds string `mapstructure:"seeds"`
|
||||
SkipUPNP bool `mapstructure:"skip_upnp"`
|
||||
AddrBook string `mapstructure:"addr_book_file"`
|
||||
AddrBookStrict bool `mapstructure:"addr_book_strict"`
|
||||
PexReactor bool `mapstructure:"pex"`
|
||||
MaxNumPeers int `mapstructure:"max_num_peers"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
|
||||
// Address to listen for incoming connections
|
||||
ListenAddress string `mapstructure:"laddr"`
|
||||
|
||||
// Comma separated list of seed nodes to connect to
|
||||
Seeds string `mapstructure:"seeds"`
|
||||
|
||||
// Skip UPNP port forwarding
|
||||
SkipUPNP bool `mapstructure:"skip_upnp"`
|
||||
|
||||
// Path to address book
|
||||
AddrBook string `mapstructure:"addr_book_file"`
|
||||
|
||||
// Set true for strict address routability rules
|
||||
AddrBookStrict bool `mapstructure:"addr_book_strict"`
|
||||
|
||||
// Set true to enable the peer-exchange reactor
|
||||
PexReactor bool `mapstructure:"pex"`
|
||||
|
||||
// Maximum number of peers to connect to
|
||||
MaxNumPeers int `mapstructure:"max_num_peers"`
|
||||
|
||||
// Time to wait before flushing messages out on the connection, in ms
|
||||
FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"`
|
||||
|
||||
// Maximum size of a message packet payload, in bytes
|
||||
MaxMsgPacketPayloadSize int `mapstructure:"max_msg_packet_payload_size"`
|
||||
|
||||
// Rate at which packets can be sent, in bytes/second
|
||||
SendRate int64 `mapstructure:"send_rate"`
|
||||
|
||||
// Rate at which packets can be received, in bytes/second
|
||||
RecvRate int64 `mapstructure:"recv_rate"`
|
||||
}
|
||||
|
||||
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer
|
||||
func DefaultP2PConfig() *P2PConfig {
|
||||
return &P2PConfig{
|
||||
ListenAddress: "tcp://0.0.0.0:46656",
|
||||
AddrBook: "addrbook.json",
|
||||
AddrBookStrict: true,
|
||||
MaxNumPeers: 50,
|
||||
ListenAddress: "tcp://0.0.0.0:46656",
|
||||
AddrBook: "addrbook.json",
|
||||
AddrBookStrict: true,
|
||||
MaxNumPeers: 50,
|
||||
FlushThrottleTimeout: 100,
|
||||
MaxMsgPacketPayloadSize: 1024, // 1 kB
|
||||
SendRate: 512000, // 500 kB/s
|
||||
RecvRate: 512000, // 500 kB/s
|
||||
}
|
||||
}
|
||||
|
||||
// TestP2PConfig returns a configuration for testing the peer-to-peer layer
|
||||
func TestP2PConfig() *P2PConfig {
|
||||
conf := DefaultP2PConfig()
|
||||
conf.ListenAddress = "tcp://0.0.0.0:36656"
|
||||
@@ -211,6 +255,7 @@ func TestP2PConfig() *P2PConfig {
|
||||
return conf
|
||||
}
|
||||
|
||||
// AddrBookFile returns the full path to the address bool
|
||||
func (p *P2PConfig) AddrBookFile() string {
|
||||
return rootify(p.AddrBook, p.RootDir)
|
||||
}
|
||||
@@ -218,6 +263,7 @@ func (p *P2PConfig) AddrBookFile() string {
|
||||
//-----------------------------------------------------------------------------
|
||||
// MempoolConfig
|
||||
|
||||
// MempoolConfig defines the configuration options for the Tendermint mempool
|
||||
type MempoolConfig struct {
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
@@ -226,6 +272,7 @@ type MempoolConfig struct {
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
}
|
||||
|
||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
|
||||
func DefaultMempoolConfig() *MempoolConfig {
|
||||
return &MempoolConfig{
|
||||
Recheck: true,
|
||||
@@ -235,6 +282,7 @@ func DefaultMempoolConfig() *MempoolConfig {
|
||||
}
|
||||
}
|
||||
|
||||
// WalDir returns the full path to the mempool's write-ahead log
|
||||
func (m *MempoolConfig) WalDir() string {
|
||||
return rootify(m.WalPath, m.RootDir)
|
||||
}
|
||||
@@ -242,8 +290,8 @@ func (m *MempoolConfig) WalDir() string {
|
||||
//-----------------------------------------------------------------------------
|
||||
// ConsensusConfig
|
||||
|
||||
// ConsensusConfig holds timeouts and details about the WAL, the block structure,
|
||||
// and timeouts in the consensus protocol.
|
||||
// ConsensusConfig defines the confuguration for the Tendermint consensus service,
|
||||
// including timeouts and details about the WAL and the block structure.
|
||||
type ConsensusConfig struct {
|
||||
RootDir string `mapstructure:"home"`
|
||||
WalPath string `mapstructure:"wal_file"`
|
||||
@@ -266,49 +314,78 @@ type ConsensusConfig struct {
|
||||
MaxBlockSizeTxs int `mapstructure:"max_block_size_txs"`
|
||||
MaxBlockSizeBytes int `mapstructure:"max_block_size_bytes"`
|
||||
|
||||
// TODO: This probably shouldn't be exposed but it makes it
|
||||
// easy to write tests for the wal/replay
|
||||
BlockPartSize int `mapstructure:"block_part_size"`
|
||||
// EmptyBlocks mode and possible interval between empty blocks in seconds
|
||||
CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"`
|
||||
CreateEmptyBlocksInterval int `mapstructure:"create_empty_blocks_interval"`
|
||||
|
||||
// Reactor sleep duration parameters are in ms
|
||||
PeerGossipSleepDuration int `mapstructure:"peer_gossip_sleep_duration"`
|
||||
PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"`
|
||||
}
|
||||
|
||||
// Wait this long for a proposal
|
||||
// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step
|
||||
func (cfg *ConsensusConfig) WaitForTxs() bool {
|
||||
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
|
||||
}
|
||||
|
||||
// EmptyBlocks returns the amount of time to wait before proposing an empty block or starting the propose timer if there are no txs available
|
||||
func (cfg *ConsensusConfig) EmptyBlocksInterval() time.Duration {
|
||||
return time.Duration(cfg.CreateEmptyBlocksInterval) * time.Second
|
||||
}
|
||||
|
||||
// Propose returns the amount of time to wait for a proposal
|
||||
func (cfg *ConsensusConfig) Propose(round int) time.Duration {
|
||||
return time.Duration(cfg.TimeoutPropose+cfg.TimeoutProposeDelta*round) * time.Millisecond
|
||||
}
|
||||
|
||||
// After receiving any +2/3 prevote, wait this long for stragglers
|
||||
// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes
|
||||
func (cfg *ConsensusConfig) Prevote(round int) time.Duration {
|
||||
return time.Duration(cfg.TimeoutPrevote+cfg.TimeoutPrevoteDelta*round) * time.Millisecond
|
||||
}
|
||||
|
||||
// After receiving any +2/3 precommits, wait this long for stragglers
|
||||
// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits
|
||||
func (cfg *ConsensusConfig) Precommit(round int) time.Duration {
|
||||
return time.Duration(cfg.TimeoutPrecommit+cfg.TimeoutPrecommitDelta*round) * time.Millisecond
|
||||
}
|
||||
|
||||
// After receiving +2/3 precommits for a single block (a commit), wait this long for stragglers in the next height's RoundStepNewHeight
|
||||
// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits for a single block (ie. a commit).
|
||||
func (cfg *ConsensusConfig) Commit(t time.Time) time.Time {
|
||||
return t.Add(time.Duration(cfg.TimeoutCommit) * time.Millisecond)
|
||||
}
|
||||
|
||||
// PeerGossipSleep returns the amount of time to sleep if there is nothing to send from the ConsensusReactor
|
||||
func (cfg *ConsensusConfig) PeerGossipSleep() time.Duration {
|
||||
return time.Duration(cfg.PeerGossipSleepDuration) * time.Millisecond
|
||||
}
|
||||
|
||||
// PeerQueryMaj23Sleep returns the amount of time to sleep after each VoteSetMaj23Message is sent in the ConsensusReactor
|
||||
func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration {
|
||||
return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond
|
||||
}
|
||||
|
||||
// DefaultConsensusConfig returns a default configuration for the consensus service
|
||||
func DefaultConsensusConfig() *ConsensusConfig {
|
||||
return &ConsensusConfig{
|
||||
WalPath: "data/cs.wal/wal",
|
||||
WalLight: false,
|
||||
TimeoutPropose: 3000,
|
||||
TimeoutProposeDelta: 500,
|
||||
TimeoutPrevote: 1000,
|
||||
TimeoutPrevoteDelta: 500,
|
||||
TimeoutPrecommit: 1000,
|
||||
TimeoutPrecommitDelta: 500,
|
||||
TimeoutCommit: 1000,
|
||||
SkipTimeoutCommit: false,
|
||||
MaxBlockSizeTxs: 10000,
|
||||
MaxBlockSizeBytes: 1, // TODO
|
||||
BlockPartSize: types.DefaultBlockPartSize, // TODO: we shouldnt be importing types
|
||||
WalPath: "data/cs.wal/wal",
|
||||
WalLight: false,
|
||||
TimeoutPropose: 3000,
|
||||
TimeoutProposeDelta: 500,
|
||||
TimeoutPrevote: 1000,
|
||||
TimeoutPrevoteDelta: 500,
|
||||
TimeoutPrecommit: 1000,
|
||||
TimeoutPrecommitDelta: 500,
|
||||
TimeoutCommit: 1000,
|
||||
SkipTimeoutCommit: false,
|
||||
MaxBlockSizeTxs: 10000,
|
||||
MaxBlockSizeBytes: 1, // TODO
|
||||
CreateEmptyBlocks: true,
|
||||
CreateEmptyBlocksInterval: 0,
|
||||
PeerGossipSleepDuration: 100,
|
||||
PeerQueryMaj23SleepDuration: 2000,
|
||||
}
|
||||
}
|
||||
|
||||
// TestConsensusConfig returns a configuration for testing the consensus service
|
||||
func TestConsensusConfig() *ConsensusConfig {
|
||||
config := DefaultConsensusConfig()
|
||||
config.TimeoutPropose = 2000
|
||||
@@ -322,6 +399,7 @@ func TestConsensusConfig() *ConsensusConfig {
|
||||
return config
|
||||
}
|
||||
|
||||
// WalFile returns the full path to the write-ahead log file
|
||||
func (c *ConsensusConfig) WalFile() string {
|
||||
if c.walFile != "" {
|
||||
return c.walFile
|
||||
@@ -329,6 +407,7 @@ func (c *ConsensusConfig) WalFile() string {
|
||||
return rootify(c.WalPath, c.RootDir)
|
||||
}
|
||||
|
||||
// SetWalFile sets the path to the write-ahead log file
|
||||
func (c *ConsensusConfig) SetWalFile(walFile string) {
|
||||
c.walFile = walFile
|
||||
}
|
||||
|
@@ -119,7 +119,7 @@ var testGenesis = `{
|
||||
"type": "ed25519",
|
||||
"data":"3B3069C422E19688B45CBFAE7BB009FC0FA1B1EA86593519318B7214853803C8"
|
||||
},
|
||||
"amount": 10,
|
||||
"power": 10,
|
||||
"name": ""
|
||||
}
|
||||
],
|
||||
|
@@ -5,6 +5,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
data "github.com/tendermint/go-wire/data"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
. "github.com/tendermint/tmlibs/common"
|
||||
@@ -53,7 +55,7 @@ func TestByzantine(t *testing.T) {
|
||||
eventLogger := logger.With("module", "events")
|
||||
for i := 0; i < N; i++ {
|
||||
if i == 0 {
|
||||
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator.(*types.PrivValidator))
|
||||
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator)
|
||||
// make byzantine
|
||||
css[i].decideProposal = func(j int) func(int, int) {
|
||||
return func(height, round int) {
|
||||
@@ -188,7 +190,7 @@ func byzantineDecideProposalFunc(t *testing.T, height, round int, cs *ConsensusS
|
||||
}
|
||||
}
|
||||
|
||||
func sendProposalAndParts(height, round int, cs *ConsensusState, peer *p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
|
||||
func sendProposalAndParts(height, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
|
||||
// proposal
|
||||
msg := &ProposalMessage{Proposal: proposal}
|
||||
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
|
||||
@@ -231,14 +233,14 @@ func NewByzantineReactor(conR *ConsensusReactor) *ByzantineReactor {
|
||||
|
||||
func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) }
|
||||
func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() }
|
||||
func (br *ByzantineReactor) AddPeer(peer *p2p.Peer) {
|
||||
func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
|
||||
if !br.reactor.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
// Create peerState for peer
|
||||
peerState := NewPeerState(peer)
|
||||
peer.Data.Set(types.PeerStateKey, peerState)
|
||||
peerState := NewPeerState(peer).SetLogger(br.reactor.Logger)
|
||||
peer.Set(types.PeerStateKey, peerState)
|
||||
|
||||
// Send our state to peer.
|
||||
// If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
|
||||
@@ -246,10 +248,10 @@ func (br *ByzantineReactor) AddPeer(peer *p2p.Peer) {
|
||||
br.reactor.sendNewRoundStepMessages(peer)
|
||||
}
|
||||
}
|
||||
func (br *ByzantineReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
|
||||
func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
br.reactor.RemovePeer(peer, reason)
|
||||
}
|
||||
func (br *ByzantineReactor) Receive(chID byte, peer *p2p.Peer, msgBytes []byte) {
|
||||
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
br.reactor.Receive(chID, peer, msgBytes)
|
||||
}
|
||||
|
||||
@@ -257,42 +259,42 @@ func (br *ByzantineReactor) Receive(chID byte, peer *p2p.Peer, msgBytes []byte)
|
||||
// byzantine privValidator
|
||||
|
||||
type ByzantinePrivValidator struct {
|
||||
Address []byte `json:"address"`
|
||||
types.Signer `json:"-"`
|
||||
types.Signer
|
||||
|
||||
mtx sync.Mutex
|
||||
pv types.PrivValidator
|
||||
}
|
||||
|
||||
// Return a priv validator that will sign anything
|
||||
func NewByzantinePrivValidator(pv *types.PrivValidator) *ByzantinePrivValidator {
|
||||
func NewByzantinePrivValidator(pv types.PrivValidator) *ByzantinePrivValidator {
|
||||
return &ByzantinePrivValidator{
|
||||
Address: pv.Address,
|
||||
Signer: pv.Signer,
|
||||
Signer: pv.(*types.PrivValidatorFS).Signer,
|
||||
pv: pv,
|
||||
}
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) GetAddress() []byte {
|
||||
return privVal.Address
|
||||
func (privVal *ByzantinePrivValidator) GetAddress() data.Bytes {
|
||||
return privVal.pv.GetAddress()
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote) error {
|
||||
privVal.mtx.Lock()
|
||||
defer privVal.mtx.Unlock()
|
||||
func (privVal *ByzantinePrivValidator) GetPubKey() crypto.PubKey {
|
||||
return privVal.pv.GetPubKey()
|
||||
}
|
||||
|
||||
// Sign
|
||||
vote.Signature = privVal.Sign(types.SignBytes(chainID, vote))
|
||||
func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote) (err error) {
|
||||
vote.Signature, err = privVal.Sign(types.SignBytes(chainID, vote))
|
||||
return err
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) (err error) {
|
||||
proposal.Signature, err = privVal.Sign(types.SignBytes(chainID, proposal))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) error {
|
||||
privVal.mtx.Lock()
|
||||
defer privVal.mtx.Unlock()
|
||||
|
||||
// Sign
|
||||
proposal.Signature = privVal.Sign(types.SignBytes(chainID, proposal))
|
||||
func (privVal *ByzantinePrivValidator) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) (err error) {
|
||||
heartbeat.Signature, err = privVal.Sign(types.SignBytes(chainID, heartbeat))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (privVal *ByzantinePrivValidator) String() string {
|
||||
return Fmt("PrivValidator{%X}", privVal.Address)
|
||||
return Fmt("PrivValidator{%X}", privVal.GetAddress())
|
||||
}
|
||||
|
@@ -31,7 +31,7 @@ import (
|
||||
|
||||
// genesis, chain_id, priv_val
|
||||
var config *cfg.Config // NOTE: must be reset for each _test.go file
|
||||
var ensureTimeout = time.Duration(2)
|
||||
var ensureTimeout = time.Second * 2
|
||||
|
||||
func ensureDir(dir string, mode os.FileMode) {
|
||||
if err := EnsureDir(dir, mode); err != nil {
|
||||
@@ -50,12 +50,12 @@ type validatorStub struct {
|
||||
Index int // Validator index. NOTE: we don't assume validator set changes.
|
||||
Height int
|
||||
Round int
|
||||
*types.PrivValidator
|
||||
types.PrivValidator
|
||||
}
|
||||
|
||||
var testMinPower = 10
|
||||
|
||||
func NewValidatorStub(privValidator *types.PrivValidator, valIndex int) *validatorStub {
|
||||
func NewValidatorStub(privValidator types.PrivValidator, valIndex int) *validatorStub {
|
||||
return &validatorStub{
|
||||
Index: valIndex,
|
||||
PrivValidator: privValidator,
|
||||
@@ -65,7 +65,7 @@ func NewValidatorStub(privValidator *types.PrivValidator, valIndex int) *validat
|
||||
func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
|
||||
vote := &types.Vote{
|
||||
ValidatorIndex: vs.Index,
|
||||
ValidatorAddress: vs.PrivValidator.Address,
|
||||
ValidatorAddress: vs.PrivValidator.GetAddress(),
|
||||
Height: vs.Height,
|
||||
Round: vs.Round,
|
||||
Type: voteType,
|
||||
@@ -142,7 +142,7 @@ func signAddVotes(to *ConsensusState, voteType byte, hash []byte, header types.P
|
||||
func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *validatorStub, blockHash []byte) {
|
||||
prevotes := cs.Votes.Prevotes(round)
|
||||
var vote *types.Vote
|
||||
if vote = prevotes.GetByAddress(privVal.Address); vote == nil {
|
||||
if vote = prevotes.GetByAddress(privVal.GetAddress()); vote == nil {
|
||||
panic("Failed to find prevote from validator")
|
||||
}
|
||||
if blockHash == nil {
|
||||
@@ -159,7 +159,7 @@ func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *valid
|
||||
func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorStub, blockHash []byte) {
|
||||
votes := cs.LastCommit
|
||||
var vote *types.Vote
|
||||
if vote = votes.GetByAddress(privVal.Address); vote == nil {
|
||||
if vote = votes.GetByAddress(privVal.GetAddress()); vote == nil {
|
||||
panic("Failed to find precommit from validator")
|
||||
}
|
||||
if !bytes.Equal(vote.BlockID.Hash, blockHash) {
|
||||
@@ -170,7 +170,7 @@ func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorS
|
||||
func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) {
|
||||
precommits := cs.Votes.Precommits(thisRound)
|
||||
var vote *types.Vote
|
||||
if vote = precommits.GetByAddress(privVal.Address); vote == nil {
|
||||
if vote = precommits.GetByAddress(privVal.GetAddress()); vote == nil {
|
||||
panic("Failed to find precommit from validator")
|
||||
}
|
||||
|
||||
@@ -225,11 +225,11 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
|
||||
//-------------------------------------------------------------------------------
|
||||
// consensus states
|
||||
|
||||
func newConsensusState(state *sm.State, pv *types.PrivValidator, app abci.Application) *ConsensusState {
|
||||
func newConsensusState(state *sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
|
||||
return newConsensusStateWithConfig(config, state, pv, app)
|
||||
}
|
||||
|
||||
func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv *types.PrivValidator, app abci.Application) *ConsensusState {
|
||||
func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
|
||||
// Get BlockStore
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := bc.NewBlockStore(blockDB)
|
||||
@@ -240,8 +240,11 @@ func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv *ty
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem)
|
||||
mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
// Make ConsensusReactor
|
||||
cs := NewConsensusState(thisConfig.Consensus, state, proxyAppConnCon, blockStore, mempool)
|
||||
@@ -255,17 +258,17 @@ func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv *ty
|
||||
return cs
|
||||
}
|
||||
|
||||
func loadPrivValidator(config *cfg.Config) *types.PrivValidator {
|
||||
func loadPrivValidator(config *cfg.Config) *types.PrivValidatorFS {
|
||||
privValidatorFile := config.PrivValidatorFile()
|
||||
ensureDir(path.Dir(privValidatorFile), 0700)
|
||||
privValidator := types.LoadOrGenPrivValidator(privValidatorFile, log.TestingLogger())
|
||||
privValidator := types.LoadOrGenPrivValidatorFS(privValidatorFile)
|
||||
privValidator.Reset()
|
||||
return privValidator
|
||||
}
|
||||
|
||||
func fixedConsensusStateDummy() *ConsensusState {
|
||||
stateDB := dbm.NewMemDB()
|
||||
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
|
||||
state, _ := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
|
||||
state.SetLogger(log.TestingLogger().With("module", "state"))
|
||||
privValidator := loadPrivValidator(config)
|
||||
cs := newConsensusState(state, privValidator, dummy.NewDummyApplication())
|
||||
@@ -294,12 +297,22 @@ func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
|
||||
//-------------------------------------------------------------------------------
|
||||
|
||||
func ensureNoNewStep(stepCh chan interface{}) {
|
||||
timeout := time.NewTicker(ensureTimeout * time.Second)
|
||||
timer := time.NewTimer(ensureTimeout)
|
||||
select {
|
||||
case <-timeout.C:
|
||||
case <-timer.C:
|
||||
break
|
||||
case <-stepCh:
|
||||
panic("We should be stuck waiting for more votes, not moving to the next step")
|
||||
panic("We should be stuck waiting, not moving to the next step")
|
||||
}
|
||||
}
|
||||
|
||||
func ensureNewStep(stepCh chan interface{}) {
|
||||
timer := time.NewTimer(ensureTimeout)
|
||||
select {
|
||||
case <-timer.C:
|
||||
panic("We shouldnt be stuck waiting")
|
||||
case <-stepCh:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -319,16 +332,19 @@ func consensusLogger() log.Logger {
|
||||
})
|
||||
}
|
||||
|
||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
|
||||
func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application, configOpts ...func(*cfg.Config)) []*ConsensusState {
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 10)
|
||||
css := make([]*ConsensusState, nValidators)
|
||||
logger := consensusLogger()
|
||||
for i := 0; i < nValidators; i++ {
|
||||
db := dbm.NewMemDB() // each state needs its own db
|
||||
state := sm.MakeGenesisState(db, genDoc)
|
||||
state, _ := sm.MakeGenesisState(db, genDoc)
|
||||
state.SetLogger(logger.With("module", "state", "validator", i))
|
||||
state.Save()
|
||||
thisConfig := ResetConfig(Fmt("%s_%d", testName, i))
|
||||
for _, opt := range configOpts {
|
||||
opt(thisConfig)
|
||||
}
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], appFunc())
|
||||
css[i].SetLogger(logger.With("validator", i))
|
||||
@@ -343,18 +359,17 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
||||
css := make([]*ConsensusState, nPeers)
|
||||
for i := 0; i < nPeers; i++ {
|
||||
db := dbm.NewMemDB() // each state needs its own db
|
||||
state := sm.MakeGenesisState(db, genDoc)
|
||||
state, _ := sm.MakeGenesisState(db, genDoc)
|
||||
state.SetLogger(log.TestingLogger().With("module", "state"))
|
||||
state.Save()
|
||||
thisConfig := ResetConfig(Fmt("%s_%d", testName, i))
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
var privVal *types.PrivValidator
|
||||
var privVal types.PrivValidator
|
||||
if i < nValidators {
|
||||
privVal = privVals[i]
|
||||
} else {
|
||||
privVal = types.GenPrivValidator()
|
||||
_, tempFilePath := Tempfile("priv_validator_")
|
||||
privVal.SetFile(tempFilePath)
|
||||
privVal = types.GenPrivValidatorFS(tempFilePath)
|
||||
}
|
||||
|
||||
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, appFunc())
|
||||
@@ -364,9 +379,9 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
|
||||
return css
|
||||
}
|
||||
|
||||
func getSwitchIndex(switches []*p2p.Switch, peer *p2p.Peer) int {
|
||||
func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int {
|
||||
for i, s := range switches {
|
||||
if bytes.Equal(peer.NodeInfo.PubKey.Address(), s.NodeInfo().PubKey.Address()) {
|
||||
if bytes.Equal(peer.NodeInfo().PubKey.Address(), s.NodeInfo().PubKey.Address()) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
@@ -377,14 +392,14 @@ func getSwitchIndex(switches []*p2p.Switch, peer *p2p.Peer) int {
|
||||
//-------------------------------------------------------------------------------
|
||||
// genesis
|
||||
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []*types.PrivValidator) {
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []*types.PrivValidatorFS) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]*types.PrivValidator, numValidators)
|
||||
privValidators := make([]*types.PrivValidatorFS, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Amount: val.VotingPower,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
@@ -396,10 +411,10 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.State, []*types.PrivValidator) {
|
||||
func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.State, []*types.PrivValidatorFS) {
|
||||
genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower)
|
||||
db := dbm.NewMemDB()
|
||||
s0 := sm.MakeGenesisState(db, genDoc)
|
||||
s0, _ := sm.MakeGenesisState(db, genDoc)
|
||||
s0.SetLogger(log.TestingLogger().With("module", "state"))
|
||||
s0.Save()
|
||||
return s0, privValidators
|
||||
|
@@ -44,10 +44,10 @@ func TestPeerCatchupRounds(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func makeVoteHR(t *testing.T, height, round int, privVals []*types.PrivValidator, valIndex int) *types.Vote {
|
||||
func makeVoteHR(t *testing.T, height, round int, privVals []*types.PrivValidatorFS, valIndex int) *types.Vote {
|
||||
privVal := privVals[valIndex]
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: privVal.Address,
|
||||
ValidatorAddress: privVal.GetAddress(),
|
||||
ValidatorIndex: valIndex,
|
||||
Height: height,
|
||||
Round: round,
|
||||
|
@@ -15,6 +15,82 @@ func init() {
|
||||
config = ResetConfig("consensus_mempool_test")
|
||||
}
|
||||
|
||||
func TestNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
config.Consensus.CreateEmptyBlocks = false
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
cs.mempool.EnableTxsAvailable()
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
|
||||
startTestRound(cs, height, round)
|
||||
|
||||
ensureNewStep(newBlockCh) // first block gets committed
|
||||
ensureNoNewStep(newBlockCh)
|
||||
deliverTxsRange(cs, 0, 2)
|
||||
ensureNewStep(newBlockCh) // commit txs
|
||||
ensureNewStep(newBlockCh) // commit updated app hash
|
||||
ensureNoNewStep(newBlockCh)
|
||||
|
||||
}
|
||||
|
||||
func TestProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
config.Consensus.CreateEmptyBlocksInterval = int(ensureTimeout.Seconds())
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
cs.mempool.EnableTxsAvailable()
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
|
||||
startTestRound(cs, height, round)
|
||||
|
||||
ensureNewStep(newBlockCh) // first block gets committed
|
||||
ensureNoNewStep(newBlockCh) // then we dont make a block ...
|
||||
ensureNewStep(newBlockCh) // until the CreateEmptyBlocksInterval has passed
|
||||
}
|
||||
|
||||
func TestProgressInHigherRound(t *testing.T) {
|
||||
config := ResetConfig("consensus_mempool_txs_available_test")
|
||||
config.Consensus.CreateEmptyBlocks = false
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
cs.mempool.EnableTxsAvailable()
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
|
||||
newRoundCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewRound(), 1)
|
||||
timeoutCh := subscribeToEvent(cs.evsw, "tester", types.EventStringTimeoutPropose(), 1)
|
||||
cs.setProposal = func(proposal *types.Proposal) error {
|
||||
if cs.Height == 2 && cs.Round == 0 {
|
||||
// dont set the proposal in round 0 so we timeout and
|
||||
// go to next round
|
||||
cs.Logger.Info("Ignoring set proposal at height 2, round 0")
|
||||
return nil
|
||||
}
|
||||
return cs.defaultSetProposal(proposal)
|
||||
}
|
||||
startTestRound(cs, height, round)
|
||||
|
||||
ensureNewStep(newRoundCh) // first round at first height
|
||||
ensureNewStep(newBlockCh) // first block gets committed
|
||||
ensureNewStep(newRoundCh) // first round at next height
|
||||
deliverTxsRange(cs, 0, 2) // we deliver txs, but dont set a proposal so we get the next round
|
||||
<-timeoutCh
|
||||
ensureNewStep(newRoundCh) // wait for the next round
|
||||
ensureNewStep(newBlockCh) // now we can commit the block
|
||||
}
|
||||
|
||||
func deliverTxsRange(cs *ConsensusState, start, end int) {
|
||||
// Deliver some txs.
|
||||
for i := start; i < end; i++ {
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
||||
err := cs.mempool.CheckTx(txBytes, nil)
|
||||
if err != nil {
|
||||
panic(Fmt("Error after CheckTx: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxConcurrentWithCommit(t *testing.T) {
|
||||
|
||||
state, privVals := randGenesisState(1, false, 10)
|
||||
@@ -22,21 +98,8 @@ func TestTxConcurrentWithCommit(t *testing.T) {
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
|
||||
|
||||
deliverTxsRange := func(start, end int) {
|
||||
// Deliver some txs.
|
||||
for i := start; i < end; i++ {
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
||||
err := cs.mempool.CheckTx(txBytes, nil)
|
||||
if err != nil {
|
||||
panic(Fmt("Error after CheckTx: %v", err))
|
||||
}
|
||||
// time.Sleep(time.Microsecond * time.Duration(rand.Int63n(3000)))
|
||||
}
|
||||
}
|
||||
|
||||
NTxs := 10000
|
||||
go deliverTxsRange(0, NTxs)
|
||||
go deliverTxsRange(cs, 0, NTxs)
|
||||
|
||||
startTestRound(cs, height, round)
|
||||
ticker := time.NewTicker(time.Second * 20)
|
||||
@@ -120,7 +183,7 @@ func NewCounterApplication() *CounterApplication {
|
||||
return &CounterApplication{}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) Info() abci.ResponseInfo {
|
||||
func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
|
||||
return abci.ResponseInfo{Data: Fmt("txs:%v", app.txCount)}
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -7,9 +7,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
"github.com/tendermint/tmlibs/events"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tmlibs/events"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -76,6 +78,35 @@ func TestReactor(t *testing.T) {
|
||||
}, css)
|
||||
}
|
||||
|
||||
// Ensure a testnet sends proposal heartbeats and makes blocks when there are txs
|
||||
func TestReactorProposalHeartbeats(t *testing.T) {
|
||||
N := 4
|
||||
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
|
||||
func(c *cfg.Config) {
|
||||
c.Consensus.CreateEmptyBlocks = false
|
||||
})
|
||||
reactors, eventChans := startConsensusNet(t, css, N, false)
|
||||
defer stopConsensusNet(reactors)
|
||||
heartbeatChans := make([]chan interface{}, N)
|
||||
for i := 0; i < N; i++ {
|
||||
heartbeatChans[i] = subscribeToEvent(css[i].evsw, "tester", types.EventStringProposalHeartbeat(), 1)
|
||||
}
|
||||
// wait till everyone sends a proposal heartbeat
|
||||
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) {
|
||||
<-heartbeatChans[j]
|
||||
wg.Done()
|
||||
}, css)
|
||||
|
||||
// send a tx
|
||||
css[3].mempool.CheckTx([]byte{1, 2, 3}, nil)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) {
|
||||
<-eventChans[j]
|
||||
wg.Done()
|
||||
}, css)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------
|
||||
// ensure we can make blocks despite cycling a validator set
|
||||
|
||||
@@ -101,7 +132,7 @@ func TestVotingPowerChange(t *testing.T) {
|
||||
//---------------------------------------------------------------------------
|
||||
t.Log("---------------------------- Testing changing the voting power of one validator a few times")
|
||||
|
||||
val1PubKey := css[0].privValidator.(*types.PrivValidator).PubKey
|
||||
val1PubKey := css[0].privValidator.GetPubKey()
|
||||
updateValidatorTx := dummy.MakeValSetChangeTx(val1PubKey.Bytes(), 25)
|
||||
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
@@ -162,7 +193,7 @@ func TestValidatorSetChanges(t *testing.T) {
|
||||
//---------------------------------------------------------------------------
|
||||
t.Log("---------------------------- Testing adding one validator")
|
||||
|
||||
newValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey
|
||||
newValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
||||
newValidatorTx1 := dummy.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), uint64(testMinPower))
|
||||
|
||||
// wait till everyone makes block 2
|
||||
@@ -188,7 +219,7 @@ func TestValidatorSetChanges(t *testing.T) {
|
||||
//---------------------------------------------------------------------------
|
||||
t.Log("---------------------------- Testing changing the voting power of one validator")
|
||||
|
||||
updateValidatorPubKey1 := css[nVals].privValidator.(*types.PrivValidator).PubKey
|
||||
updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
|
||||
updateValidatorTx1 := dummy.MakeValSetChangeTx(updateValidatorPubKey1.Bytes(), 25)
|
||||
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
@@ -204,10 +235,10 @@ func TestValidatorSetChanges(t *testing.T) {
|
||||
//---------------------------------------------------------------------------
|
||||
t.Log("---------------------------- Testing adding two validators at once")
|
||||
|
||||
newValidatorPubKey2 := css[nVals+1].privValidator.(*types.PrivValidator).PubKey
|
||||
newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey()
|
||||
newValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), uint64(testMinPower))
|
||||
|
||||
newValidatorPubKey3 := css[nVals+2].privValidator.(*types.PrivValidator).PubKey
|
||||
newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey()
|
||||
newValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), uint64(testMinPower))
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
|
||||
|
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
// Functionality to replay blocks and messages on recovery from a crash.
|
||||
@@ -82,7 +83,7 @@ func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan inte
|
||||
"blockID", v.BlockID, "peer", peerKey)
|
||||
}
|
||||
|
||||
cs.handleMsg(m, cs.RoundState)
|
||||
cs.handleMsg(m)
|
||||
case timeoutInfo:
|
||||
cs.Logger.Info("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration)
|
||||
cs.handleTimeout(m, cs.RoundState)
|
||||
@@ -115,35 +116,13 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
|
||||
gr, found, err = cs.wal.group.Search("#ENDHEIGHT: ", makeHeightSearchFunc(csHeight-1))
|
||||
if err == io.EOF {
|
||||
cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", csHeight-1)
|
||||
// if we upgraded from 0.9 to 0.9.1, we may have #HEIGHT instead
|
||||
// TODO (0.10.0): remove this
|
||||
gr, found, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
|
||||
if err == io.EOF {
|
||||
cs.Logger.Error("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
defer gr.Close()
|
||||
}
|
||||
if !found {
|
||||
// if we upgraded from 0.9 to 0.9.1, we may have #HEIGHT instead
|
||||
// TODO (0.10.0): remove this
|
||||
gr, _, err = cs.wal.group.Search("#HEIGHT: ", makeHeightSearchFunc(csHeight))
|
||||
if err == io.EOF {
|
||||
cs.Logger.Error("Replay: wal.group.Search returned EOF", "#HEIGHT", csHeight)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
defer gr.Close()
|
||||
}
|
||||
|
||||
// TODO (0.10.0): uncomment
|
||||
// return errors.New(cmn.Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1))
|
||||
return errors.New(cmn.Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1))
|
||||
}
|
||||
|
||||
cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight)
|
||||
@@ -221,7 +200,7 @@ func (h *Handshaker) NBlocks() int {
|
||||
// TODO: retry the handshake/replay if it fails ?
|
||||
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
// handshake is done via info request on the query conn
|
||||
res, err := proxyApp.Query().InfoSync()
|
||||
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{version.Version})
|
||||
if err != nil {
|
||||
return errors.New(cmn.Fmt("Error calling Info: %v", err))
|
||||
}
|
||||
@@ -257,7 +236,7 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p
|
||||
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain
|
||||
if appBlockHeight == 0 {
|
||||
validators := types.TM2PB.Validators(h.state.Validators)
|
||||
proxyApp.Consensus().InitChainSync(validators)
|
||||
proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators})
|
||||
}
|
||||
|
||||
// First handle edge cases and constraints on the storeBlockHeight
|
||||
@@ -324,8 +303,11 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p
|
||||
func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int, mutateState bool) ([]byte, error) {
|
||||
// App is further behind than it should be, so we need to replay blocks.
|
||||
// We replay all blocks from appBlockHeight+1.
|
||||
//
|
||||
// Note that we don't have an old version of the state,
|
||||
// so we by-pass state validation/mutation using sm.ExecCommitBlock.
|
||||
// This also means we won't be saving validator sets if they change during this period.
|
||||
//
|
||||
// If mutateState == true, the final block is replayed with h.replayBlock()
|
||||
|
||||
var appHash []byte
|
||||
|
@@ -241,12 +241,15 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
|
||||
// Get State
|
||||
stateDB := dbm.NewDB("state", config.DBBackend, config.DBDir())
|
||||
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
|
||||
state, err := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
|
||||
if err != nil {
|
||||
cmn.Exit(err.Error())
|
||||
}
|
||||
|
||||
// Create proxyAppConn connection (consensus, mempool, query)
|
||||
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
|
||||
proxyApp := proxy.NewAppConns(clientCreator, NewHandshaker(state, blockStore))
|
||||
_, err := proxyApp.Start()
|
||||
_, err = proxyApp.Start()
|
||||
if err != nil {
|
||||
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
|
||||
}
|
||||
|
@@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
abci "github.com/tendermint/abci/types"
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
wire "github.com/tendermint/go-wire"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
@@ -162,8 +163,8 @@ LOOP:
|
||||
cs.Wait()
|
||||
}
|
||||
|
||||
func toPV(pv PrivValidator) *types.PrivValidator {
|
||||
return pv.(*types.PrivValidator)
|
||||
func toPV(pv types.PrivValidator) *types.PrivValidatorFS {
|
||||
return pv.(*types.PrivValidatorFS)
|
||||
}
|
||||
|
||||
func setupReplayTest(t *testing.T, thisCase *testCase, nLines int, crashAfter bool) (*ConsensusState, chan interface{}, string, string) {
|
||||
@@ -267,8 +268,6 @@ func testReplayCrashBeforeWriteVote(t *testing.T, thisCase *testCase, lineNum in
|
||||
var (
|
||||
NUM_BLOCKS = 6 // number of blocks in the test_data/many_blocks.cswal
|
||||
mempool = types.MockMempool{}
|
||||
|
||||
testPartSize int
|
||||
)
|
||||
|
||||
//---------------------------------------
|
||||
@@ -319,8 +318,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
walFile := writeWAL(string(walBody))
|
||||
config.Consensus.SetWalFile(walFile)
|
||||
|
||||
privVal := types.LoadPrivValidator(config.PrivValidatorFile())
|
||||
testPartSize = config.Consensus.BlockPartSize
|
||||
privVal := types.LoadPrivValidatorFS(config.PrivValidatorFile())
|
||||
|
||||
wal, err := NewWAL(walFile, false)
|
||||
if err != nil {
|
||||
@@ -335,7 +333,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
state, store := stateAndStore(config, privVal.PubKey)
|
||||
state, store := stateAndStore(config, privVal.GetPubKey())
|
||||
store.chain = chain
|
||||
store.commits = commits
|
||||
|
||||
@@ -349,7 +347,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
// run nBlocks against a new client to build up the app state.
|
||||
// use a throwaway tendermint state
|
||||
proxyApp := proxy.NewAppConns(clientCreator2, nil)
|
||||
state, _ := stateAndStore(config, privVal.PubKey)
|
||||
state, _ := stateAndStore(config, privVal.GetPubKey())
|
||||
buildAppStateFromChain(proxyApp, state, chain, nBlocks, mode)
|
||||
}
|
||||
|
||||
@@ -361,7 +359,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
}
|
||||
|
||||
// get the latest app hash from the app
|
||||
res, err := proxyApp.Query().InfoSync()
|
||||
res, err := proxyApp.Query().InfoSync(abci.RequestInfo{""})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -384,6 +382,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
|
||||
}
|
||||
|
||||
func applyBlock(st *sm.State, blk *types.Block, proxyApp proxy.AppConns) {
|
||||
testPartSize := st.Params().BlockPartSizeBytes
|
||||
err := st.ApplyBlock(nil, proxyApp.Consensus(), blk, blk.MakePartSet(testPartSize).Header(), mempool)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -398,7 +397,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns,
|
||||
}
|
||||
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
proxyApp.Consensus().InitChainSync(validators)
|
||||
proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators})
|
||||
|
||||
defer proxyApp.Stop()
|
||||
switch mode {
|
||||
@@ -432,7 +431,7 @@ func buildTMStateFromChain(config *cfg.Config, state *sm.State, chain []*types.B
|
||||
defer proxyApp.Stop()
|
||||
|
||||
validators := types.TM2PB.Validators(state.Validators)
|
||||
proxyApp.Consensus().InitChainSync(validators)
|
||||
proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators})
|
||||
|
||||
var latestAppHash []byte
|
||||
|
||||
@@ -503,7 +502,7 @@ func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
// if its not the first one, we have a full block
|
||||
if blockParts != nil {
|
||||
var n int
|
||||
block := wire.ReadBinary(&types.Block{}, blockParts.GetReader(), types.MaxBlockSize, &n, &err).(*types.Block)
|
||||
block := wire.ReadBinary(&types.Block{}, blockParts.GetReader(), 0, &n, &err).(*types.Block)
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
blockParts = types.NewPartSetFromHeader(*p)
|
||||
@@ -524,7 +523,7 @@ func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
}
|
||||
// grab the last block too
|
||||
var n int
|
||||
block := wire.ReadBinary(&types.Block{}, blockParts.GetReader(), types.MaxBlockSize, &n, &err).(*types.Block)
|
||||
block := wire.ReadBinary(&types.Block{}, blockParts.GetReader(), 0, &n, &err).(*types.Block)
|
||||
blocks = append(blocks, block)
|
||||
return blocks, commits, nil
|
||||
}
|
||||
@@ -560,10 +559,10 @@ func readPieceFromWAL(msgBytes []byte) (interface{}, error) {
|
||||
// fresh state and mock store
|
||||
func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (*sm.State, *mockBlockStore) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
state := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
|
||||
state, _ := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
|
||||
state.SetLogger(log.TestingLogger().With("module", "state"))
|
||||
|
||||
store := NewMockBlockStore(config)
|
||||
store := NewMockBlockStore(config, state.Params())
|
||||
return state, store
|
||||
}
|
||||
|
||||
@@ -572,13 +571,14 @@ func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (*sm.State, *mockBl
|
||||
|
||||
type mockBlockStore struct {
|
||||
config *cfg.Config
|
||||
params types.ConsensusParams
|
||||
chain []*types.Block
|
||||
commits []*types.Commit
|
||||
}
|
||||
|
||||
// TODO: NewBlockStore(db.NewMemDB) ...
|
||||
func NewMockBlockStore(config *cfg.Config) *mockBlockStore {
|
||||
return &mockBlockStore{config, nil, nil}
|
||||
func NewMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore {
|
||||
return &mockBlockStore{config, params, nil, nil}
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) Height() int { return len(bs.chain) }
|
||||
@@ -586,7 +586,7 @@ func (bs *mockBlockStore) LoadBlock(height int) *types.Block { return bs.chain[h
|
||||
func (bs *mockBlockStore) LoadBlockMeta(height int) *types.BlockMeta {
|
||||
block := bs.chain[height-1]
|
||||
return &types.BlockMeta{
|
||||
BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.config.Consensus.BlockPartSize).Header()},
|
||||
BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.params.BlockPartSizeBytes).Header()},
|
||||
Header: block.Header,
|
||||
}
|
||||
}
|
||||
|
@@ -4,24 +4,30 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
fail "github.com/ebuchman/fail-test"
|
||||
|
||||
wire "github.com/tendermint/go-wire"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Config
|
||||
|
||||
const (
|
||||
proposalHeartbeatIntervalSeconds = 2
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Errors
|
||||
|
||||
@@ -35,6 +41,7 @@ var (
|
||||
//-----------------------------------------------------------------------------
|
||||
// RoundStepType enum type
|
||||
|
||||
// RoundStepType enumerates the state of the consensus state machine
|
||||
type RoundStepType uint8 // These must be numeric, ordered.
|
||||
|
||||
const (
|
||||
@@ -49,6 +56,7 @@ const (
|
||||
// NOTE: RoundStepNewHeight acts as RoundStepCommitWait.
|
||||
)
|
||||
|
||||
// String returns a string
|
||||
func (rs RoundStepType) String() string {
|
||||
switch rs {
|
||||
case RoundStepNewHeight:
|
||||
@@ -74,7 +82,8 @@ func (rs RoundStepType) String() string {
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// Immutable when returned from ConsensusState.GetRoundState()
|
||||
// RoundState defines the internal consensus state.
|
||||
// It is Immutable when returned from ConsensusState.GetRoundState()
|
||||
// TODO: Actually, only the top pointer is copied,
|
||||
// so access to field pointers is still racey
|
||||
type RoundState struct {
|
||||
@@ -96,6 +105,7 @@ type RoundState struct {
|
||||
LastValidators *types.ValidatorSet
|
||||
}
|
||||
|
||||
// RoundStateEvent returns the H/R/S of the RoundState as an event.
|
||||
func (rs *RoundState) RoundStateEvent() types.EventDataRoundState {
|
||||
edrs := types.EventDataRoundState{
|
||||
Height: rs.Height,
|
||||
@@ -106,10 +116,12 @@ func (rs *RoundState) RoundStateEvent() types.EventDataRoundState {
|
||||
return edrs
|
||||
}
|
||||
|
||||
// String returns a string
|
||||
func (rs *RoundState) String() string {
|
||||
return rs.StringIndented("")
|
||||
}
|
||||
|
||||
// StringIndented returns a string
|
||||
func (rs *RoundState) StringIndented(indent string) string {
|
||||
return fmt.Sprintf(`RoundState{
|
||||
%s H:%v R:%v S:%v
|
||||
@@ -138,6 +150,7 @@ func (rs *RoundState) StringIndented(indent string) string {
|
||||
indent)
|
||||
}
|
||||
|
||||
// StringShort returns a string
|
||||
func (rs *RoundState) StringShort() string {
|
||||
return fmt.Sprintf(`RoundState{H:%v R:%v S:%v ST:%v}`,
|
||||
rs.Height, rs.Round, rs.Step, rs.StartTime)
|
||||
@@ -167,19 +180,16 @@ func (ti *timeoutInfo) String() string {
|
||||
return fmt.Sprintf("%v ; %d/%d %v", ti.Duration, ti.Height, ti.Round, ti.Step)
|
||||
}
|
||||
|
||||
type PrivValidator interface {
|
||||
GetAddress() []byte
|
||||
SignVote(chainID string, vote *types.Vote) error
|
||||
SignProposal(chainID string, proposal *types.Proposal) error
|
||||
}
|
||||
|
||||
// Tracks consensus state across block heights and rounds.
|
||||
// ConsensusState handles execution of the consensus algorithm.
|
||||
// It processes votes and proposals, and upon reaching agreement,
|
||||
// commits blocks to the chain and executes them against the application.
|
||||
// The internal state machine receives input from peers, the internal validator, and from a timer.
|
||||
type ConsensusState struct {
|
||||
cmn.BaseService
|
||||
|
||||
// config details
|
||||
config *cfg.ConsensusConfig
|
||||
privValidator PrivValidator // for signing votes
|
||||
privValidator types.PrivValidator // for signing votes
|
||||
|
||||
// services for creating and executing blocks
|
||||
proxyAppConn proxy.AppConnConsensus
|
||||
@@ -218,6 +228,7 @@ type ConsensusState struct {
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// NewConsensusState returns a new ConsensusState.
|
||||
func NewConsensusState(config *cfg.ConsensusConfig, state *sm.State, proxyAppConn proxy.AppConnConsensus, blockStore types.BlockStore, mempool types.Mempool) *ConsensusState {
|
||||
cs := &ConsensusState{
|
||||
config: config,
|
||||
@@ -256,17 +267,20 @@ func (cs *ConsensusState) SetEventSwitch(evsw types.EventSwitch) {
|
||||
cs.evsw = evsw
|
||||
}
|
||||
|
||||
// String returns a string.
|
||||
func (cs *ConsensusState) String() string {
|
||||
// better not to access shared variables
|
||||
return cmn.Fmt("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step)
|
||||
}
|
||||
|
||||
// GetState returns a copy of the chain state.
|
||||
func (cs *ConsensusState) GetState() *sm.State {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
return cs.state.Copy()
|
||||
}
|
||||
|
||||
// GetRoundState returns a copy of the internal consensus state.
|
||||
func (cs *ConsensusState) GetRoundState() *RoundState {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
@@ -278,26 +292,28 @@ func (cs *ConsensusState) getRoundState() *RoundState {
|
||||
return &rs
|
||||
}
|
||||
|
||||
// GetValidators returns a copy of the current validators.
|
||||
func (cs *ConsensusState) GetValidators() (int, []*types.Validator) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators
|
||||
}
|
||||
|
||||
// Sets our private validator account for signing votes.
|
||||
func (cs *ConsensusState) SetPrivValidator(priv PrivValidator) {
|
||||
// SetPrivValidator sets the private validator account for signing votes.
|
||||
func (cs *ConsensusState) SetPrivValidator(priv types.PrivValidator) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
cs.privValidator = priv
|
||||
}
|
||||
|
||||
// Set the local timer
|
||||
// SetTimeoutTicker sets the local timer. It may be useful to overwrite for testing.
|
||||
func (cs *ConsensusState) SetTimeoutTicker(timeoutTicker TimeoutTicker) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
cs.timeoutTicker = timeoutTicker
|
||||
}
|
||||
|
||||
// LoadCommit loads the commit for a given height.
|
||||
func (cs *ConsensusState) LoadCommit(height int) *types.Commit {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
@@ -307,6 +323,8 @@ func (cs *ConsensusState) LoadCommit(height int) *types.Commit {
|
||||
return cs.blockStore.LoadBlockCommit(height)
|
||||
}
|
||||
|
||||
// OnStart implements cmn.Service.
|
||||
// It loads the latest state via the WAL, and starts the timeout and receive routines.
|
||||
func (cs *ConsensusState) OnStart() error {
|
||||
|
||||
walFile := cs.config.WalFile()
|
||||
@@ -347,6 +365,7 @@ func (cs *ConsensusState) startRoutines(maxSteps int) {
|
||||
go cs.receiveRoutine(maxSteps)
|
||||
}
|
||||
|
||||
// OnStop implements cmn.Service. It stops all routines and waits for the WAL to finish.
|
||||
func (cs *ConsensusState) OnStop() {
|
||||
cs.BaseService.OnStop()
|
||||
|
||||
@@ -358,15 +377,16 @@ func (cs *ConsensusState) OnStop() {
|
||||
}
|
||||
}
|
||||
|
||||
// Wait waits for the the main routine to return.
|
||||
// NOTE: be sure to Stop() the event switch and drain
|
||||
// any event channels or this may deadlock
|
||||
func (cs *ConsensusState) Wait() {
|
||||
<-cs.done
|
||||
}
|
||||
|
||||
// Open file to log all consensus messages and timeouts for deterministic accountability
|
||||
// OpenWAL opens a file to log all consensus messages and timeouts for deterministic accountability
|
||||
func (cs *ConsensusState) OpenWAL(walFile string) (err error) {
|
||||
err = cmn.EnsureDir(path.Dir(walFile), 0700)
|
||||
err = cmn.EnsureDir(filepath.Dir(walFile), 0700)
|
||||
if err != nil {
|
||||
cs.Logger.Error("Error ensuring ConsensusState wal dir", "err", err.Error())
|
||||
return err
|
||||
@@ -387,11 +407,13 @@ func (cs *ConsensusState) OpenWAL(walFile string) (err error) {
|
||||
}
|
||||
|
||||
//------------------------------------------------------------
|
||||
// Public interface for passing messages into the consensus state,
|
||||
// possibly causing a state transition
|
||||
// Public interface for passing messages into the consensus state, possibly causing a state transition.
|
||||
// If peerKey == "", the msg is considered internal.
|
||||
// Messages are added to the appropriate queue (peer or internal).
|
||||
// If the queue is full, the function may block.
|
||||
// TODO: should these return anything or let callers just use events?
|
||||
|
||||
// May block on send if queue is full.
|
||||
// AddVote inputs a vote.
|
||||
func (cs *ConsensusState) AddVote(vote *types.Vote, peerKey string) (added bool, err error) {
|
||||
if peerKey == "" {
|
||||
cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""}
|
||||
@@ -403,7 +425,7 @@ func (cs *ConsensusState) AddVote(vote *types.Vote, peerKey string) (added bool,
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// May block on send if queue is full.
|
||||
// SetProposal inputs a proposal.
|
||||
func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerKey string) error {
|
||||
|
||||
if peerKey == "" {
|
||||
@@ -416,7 +438,7 @@ func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerKey string)
|
||||
return nil
|
||||
}
|
||||
|
||||
// May block on send if queue is full.
|
||||
// AddProposalBlockPart inputs a part of the proposal block.
|
||||
func (cs *ConsensusState) AddProposalBlockPart(height, round int, part *types.Part, peerKey string) error {
|
||||
|
||||
if peerKey == "" {
|
||||
@@ -429,7 +451,7 @@ func (cs *ConsensusState) AddProposalBlockPart(height, round int, part *types.Pa
|
||||
return nil
|
||||
}
|
||||
|
||||
// May block on send if queue is full.
|
||||
// SetProposalAndBlock inputs the proposal and all block parts.
|
||||
func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerKey string) error {
|
||||
cs.SetProposal(proposal, peerKey)
|
||||
for i := 0; i < parts.Total(); i++ {
|
||||
@@ -582,8 +604,15 @@ func (cs *ConsensusState) newStep() {
|
||||
// receiveRoutine handles messages which may cause state transitions.
|
||||
// it's argument (n) is the number of messages to process before exiting - use 0 to run forever
|
||||
// It keeps the RoundState and is the only thing that updates it.
|
||||
// Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities
|
||||
// Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities.
|
||||
// ConsensusState must be locked before any internal state is updated.
|
||||
func (cs *ConsensusState) receiveRoutine(maxSteps int) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
cs.Logger.Error("CONSENSUS FAILURE!!!", "err", r)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
if maxSteps > 0 {
|
||||
if cs.nSteps >= maxSteps {
|
||||
@@ -596,15 +625,17 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
|
||||
var mi msgInfo
|
||||
|
||||
select {
|
||||
case height := <-cs.mempool.TxsAvailable():
|
||||
cs.handleTxsAvailable(height)
|
||||
case mi = <-cs.peerMsgQueue:
|
||||
cs.wal.Save(mi)
|
||||
// handles proposals, block parts, votes
|
||||
// may generate internal events (votes, complete proposals, 2/3 majorities)
|
||||
cs.handleMsg(mi, rs)
|
||||
cs.handleMsg(mi)
|
||||
case mi = <-cs.internalMsgQueue:
|
||||
cs.wal.Save(mi)
|
||||
// handles proposals, block parts, votes
|
||||
cs.handleMsg(mi, rs)
|
||||
cs.handleMsg(mi)
|
||||
case ti := <-cs.timeoutTicker.Chan(): // tockChan:
|
||||
cs.wal.Save(ti)
|
||||
// if the timeout is relevant to the rs
|
||||
@@ -628,7 +659,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
|
||||
}
|
||||
|
||||
// state transitions on complete-proposal, 2/3-any, 2/3-one
|
||||
func (cs *ConsensusState) handleMsg(mi msgInfo, rs RoundState) {
|
||||
func (cs *ConsensusState) handleMsg(mi msgInfo) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
|
||||
@@ -685,6 +716,8 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs RoundState) {
|
||||
// NewRound event fired from enterNewRound.
|
||||
// XXX: should we fire timeout here (for timeout commit)?
|
||||
cs.enterNewRound(ti.Height, 0)
|
||||
case RoundStepNewRound:
|
||||
cs.enterPropose(ti.Height, 0)
|
||||
case RoundStepPropose:
|
||||
types.FireEventTimeoutPropose(cs.evsw, cs.RoundStateEvent())
|
||||
cs.enterPrevote(ti.Height, ti.Round)
|
||||
@@ -700,13 +733,22 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs RoundState) {
|
||||
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) handleTxsAvailable(height int) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
// we only need to do this for round 0
|
||||
cs.enterPropose(height, 0)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// State functions
|
||||
// Used internally by handleTimeout and handleMsg to make state transitions
|
||||
|
||||
// Enter: +2/3 precommits for nil at (height,round-1)
|
||||
// Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit),
|
||||
// or, if SkipTimeout==true, after receiving all precommits from (height,round-1)
|
||||
// Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1)
|
||||
// Enter: `startTime = commitTime+timeoutCommit` from NewHeight(height)
|
||||
// Enter: +2/3 precommits for nil at (height,round-1)
|
||||
// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round)
|
||||
// NOTE: cs.StartTime was already set for height.
|
||||
func (cs *ConsensusState) enterNewRound(height int, round int) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != RoundStepNewHeight) {
|
||||
@@ -745,11 +787,66 @@ func (cs *ConsensusState) enterNewRound(height int, round int) {
|
||||
|
||||
types.FireEventNewRound(cs.evsw, cs.RoundStateEvent())
|
||||
|
||||
// Immediately go to enterPropose.
|
||||
cs.enterPropose(height, round)
|
||||
// Wait for txs to be available in the mempool
|
||||
// before we enterPropose in round 0. If the last block changed the app hash,
|
||||
// we may need an empty "proof" block, and enterPropose immediately.
|
||||
waitForTxs := cs.config.WaitForTxs() && round == 0 && !cs.needProofBlock(height)
|
||||
if waitForTxs {
|
||||
if cs.config.CreateEmptyBlocksInterval > 0 {
|
||||
cs.scheduleTimeout(cs.config.EmptyBlocksInterval(), height, round, RoundStepNewRound)
|
||||
}
|
||||
go cs.proposalHeartbeat(height, round)
|
||||
} else {
|
||||
cs.enterPropose(height, round)
|
||||
}
|
||||
}
|
||||
|
||||
// Enter: from NewRound(height,round).
|
||||
// needProofBlock returns true on the first height (so the genesis app hash is signed right away)
|
||||
// and where the last block (height-1) caused the app hash to change
|
||||
func (cs *ConsensusState) needProofBlock(height int) bool {
|
||||
if height == 1 {
|
||||
return true
|
||||
}
|
||||
|
||||
lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1)
|
||||
if !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) proposalHeartbeat(height, round int) {
|
||||
counter := 0
|
||||
addr := cs.privValidator.GetAddress()
|
||||
valIndex, v := cs.Validators.GetByAddress(addr)
|
||||
if v == nil {
|
||||
// not a validator
|
||||
valIndex = -1
|
||||
}
|
||||
for {
|
||||
rs := cs.GetRoundState()
|
||||
// if we've already moved on, no need to send more heartbeats
|
||||
if rs.Step > RoundStepNewRound || rs.Round > round || rs.Height > height {
|
||||
return
|
||||
}
|
||||
heartbeat := &types.Heartbeat{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
Sequence: counter,
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: valIndex,
|
||||
}
|
||||
cs.privValidator.SignHeartbeat(cs.state.ChainID, heartbeat)
|
||||
heartbeatEvent := types.EventDataProposalHeartbeat{heartbeat}
|
||||
types.FireEventProposalHeartbeat(cs.evsw, heartbeatEvent)
|
||||
counter += 1
|
||||
time.Sleep(proposalHeartbeatIntervalSeconds * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// Enter (CreateEmptyBlocks): from enterNewRound(height,round)
|
||||
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
|
||||
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
|
||||
func (cs *ConsensusState) enterPropose(height int, round int) {
|
||||
if cs.Height != height || round < cs.Round || (cs.Round == round && RoundStepPropose <= cs.Step) {
|
||||
cs.Logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
|
||||
@@ -779,7 +876,7 @@ func (cs *ConsensusState) enterPropose(height int, round int) {
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.Equal(cs.Validators.GetProposer().Address, cs.privValidator.GetAddress()) {
|
||||
if !cs.isProposer() {
|
||||
cs.Logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
|
||||
if cs.Validators.HasAddress(cs.privValidator.GetAddress()) {
|
||||
cs.Logger.Debug("This node is a validator")
|
||||
@@ -793,6 +890,10 @@ func (cs *ConsensusState) enterPropose(height int, round int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) isProposer() bool {
|
||||
return bytes.Equal(cs.Validators.GetProposer().Address, cs.privValidator.GetAddress())
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) defaultDecideProposal(height, round int) {
|
||||
var block *types.Block
|
||||
var blockParts *types.PartSet
|
||||
@@ -874,7 +975,8 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
|
||||
txs := cs.mempool.Reap(cs.config.MaxBlockSizeTxs)
|
||||
|
||||
return types.MakeBlock(cs.Height, cs.state.ChainID, txs, commit,
|
||||
cs.state.LastBlockID, cs.state.Validators.Hash(), cs.state.AppHash, cs.config.BlockPartSize)
|
||||
cs.state.LastBlockID, cs.state.Validators.Hash(),
|
||||
cs.state.AppHash, cs.state.Params().BlockPartSizeBytes)
|
||||
}
|
||||
|
||||
// Enter: `timeoutPropose` after entering Propose.
|
||||
@@ -912,25 +1014,26 @@ func (cs *ConsensusState) enterPrevote(height int, round int) {
|
||||
}
|
||||
|
||||
func (cs *ConsensusState) defaultDoPrevote(height int, round int) {
|
||||
logger := cs.Logger.With("height", height, "round", round)
|
||||
// If a block is locked, prevote that.
|
||||
if cs.LockedBlock != nil {
|
||||
cs.Logger.Info("enterPrevote: Block was locked")
|
||||
logger.Info("enterPrevote: Block was locked")
|
||||
cs.signAddVote(types.VoteTypePrevote, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header())
|
||||
return
|
||||
}
|
||||
|
||||
// If ProposalBlock is nil, prevote nil.
|
||||
if cs.ProposalBlock == nil {
|
||||
cs.Logger.Info("enterPrevote: ProposalBlock is nil")
|
||||
logger.Info("enterPrevote: ProposalBlock is nil")
|
||||
cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
// Valdiate proposal block
|
||||
// Validate proposal block
|
||||
err := cs.state.ValidateBlock(cs.ProposalBlock)
|
||||
if err != nil {
|
||||
// ProposalBlock is invalid, prevote nil.
|
||||
cs.Logger.Error("enterPrevote: ProposalBlock is invalid", "err", err)
|
||||
logger.Error("enterPrevote: ProposalBlock is invalid", "err", err)
|
||||
cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
@@ -938,6 +1041,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int, round int) {
|
||||
// Prevote cs.ProposalBlock
|
||||
// NOTE: the proposal signature is validated when it is received,
|
||||
// and the proposal block parts are validated as they are received (against the merkle hash in the proposal)
|
||||
logger.Info("enterPrevote: ProposalBlock is valid")
|
||||
cs.signAddVote(types.VoteTypePrevote, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
|
||||
}
|
||||
|
||||
@@ -962,8 +1066,8 @@ func (cs *ConsensusState) enterPrevoteWait(height int, round int) {
|
||||
cs.scheduleTimeout(cs.config.Prevote(round), height, round, RoundStepPrevoteWait)
|
||||
}
|
||||
|
||||
// Enter: +2/3 precomits for block or nil.
|
||||
// Enter: `timeoutPrevote` after any +2/3 prevotes.
|
||||
// Enter: +2/3 precomits for block or nil.
|
||||
// Enter: any +2/3 precommits for next round.
|
||||
// Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round)
|
||||
// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil,
|
||||
@@ -1306,7 +1410,8 @@ func (cs *ConsensusState) addProposalBlockPart(height int, part *types.Part, ver
|
||||
// Added and completed!
|
||||
var n int
|
||||
var err error
|
||||
cs.ProposalBlock = wire.ReadBinary(&types.Block{}, cs.ProposalBlockParts.GetReader(), types.MaxBlockSize, &n, &err).(*types.Block)
|
||||
cs.ProposalBlock = wire.ReadBinary(&types.Block{}, cs.ProposalBlockParts.GetReader(),
|
||||
cs.state.Params().BlockSizeParams.MaxBytes, &n, &err).(*types.Block)
|
||||
// NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal
|
||||
cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash())
|
||||
if cs.Step == RoundStepPropose && cs.isProposalComplete() {
|
||||
@@ -1331,19 +1436,14 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerKey string) error {
|
||||
if err == ErrVoteHeightMismatch {
|
||||
return err
|
||||
} else if _, ok := err.(*types.ErrVoteConflictingVotes); ok {
|
||||
if peerKey == "" {
|
||||
if bytes.Equal(vote.ValidatorAddress, cs.privValidator.GetAddress()) {
|
||||
cs.Logger.Error("Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, "type", vote.Type)
|
||||
return err
|
||||
}
|
||||
cs.Logger.Error("Found conflicting vote. Publish evidence (TODO)")
|
||||
/* TODO
|
||||
evidenceTx := &types.DupeoutTx{
|
||||
Address: address,
|
||||
VoteA: *errDupe.VoteA,
|
||||
VoteB: *errDupe.VoteB,
|
||||
}
|
||||
cs.mempool.BroadcastTx(struct{???}{evidenceTx}) // shouldn't need to check returned err
|
||||
*/
|
||||
cs.Logger.Error("Found conflicting vote. Publish evidence (TODO)", "height", vote.Height, "round", vote.Round, "type", vote.Type, "valAddr", vote.ValidatorAddress, "valIndex", vote.ValidatorIndex)
|
||||
|
||||
// TODO: track evidence for inclusion in a block
|
||||
|
||||
return err
|
||||
} else {
|
||||
// Probably an invalid signature. Bad peer.
|
||||
|
@@ -79,7 +79,7 @@ func TestProposerSelection0(t *testing.T) {
|
||||
<-newRoundCh
|
||||
|
||||
prop = cs1.GetRoundState().Validators.GetProposer()
|
||||
if !bytes.Equal(prop.Address, vss[1].Address) {
|
||||
if !bytes.Equal(prop.Address, vss[1].GetAddress()) {
|
||||
panic(Fmt("expected proposer to be validator %d. Got %X", 1, prop.Address))
|
||||
}
|
||||
}
|
||||
@@ -100,7 +100,7 @@ func TestProposerSelection2(t *testing.T) {
|
||||
// everyone just votes nil. we get a new proposer each round
|
||||
for i := 0; i < len(vss); i++ {
|
||||
prop := cs1.GetRoundState().Validators.GetProposer()
|
||||
if !bytes.Equal(prop.Address, vss[(i+2)%len(vss)].Address) {
|
||||
if !bytes.Equal(prop.Address, vss[(i+2)%len(vss)].GetAddress()) {
|
||||
panic(Fmt("expected proposer to be validator %d. Got %X", (i+2)%len(vss), prop.Address))
|
||||
}
|
||||
|
||||
@@ -180,7 +180,7 @@ func TestBadProposal(t *testing.T) {
|
||||
height, round := cs1.Height, cs1.Round
|
||||
vs2 := vss[1]
|
||||
|
||||
partSize := config.Consensus.BlockPartSize
|
||||
partSize := cs1.state.Params().BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
|
||||
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
|
||||
@@ -327,7 +327,7 @@ func TestLockNoPOL(t *testing.T) {
|
||||
vs2 := vss[1]
|
||||
height := cs1.Height
|
||||
|
||||
partSize := config.Consensus.BlockPartSize
|
||||
partSize := cs1.state.Params().BlockPartSizeBytes
|
||||
|
||||
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
|
||||
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
|
||||
@@ -493,7 +493,7 @@ func TestLockPOLRelock(t *testing.T) {
|
||||
cs1, vss := randConsensusState(4)
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
|
||||
partSize := config.Consensus.BlockPartSize
|
||||
partSize := cs1.state.Params().BlockPartSizeBytes
|
||||
|
||||
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
|
||||
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
|
||||
@@ -502,8 +502,6 @@ func TestLockPOLRelock(t *testing.T) {
|
||||
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
|
||||
newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlockHeader(), 1)
|
||||
|
||||
t.Logf("vs2 last round %v", vs2.PrivValidator.LastRound)
|
||||
|
||||
// everything done from perspective of cs1
|
||||
|
||||
/*
|
||||
@@ -608,7 +606,7 @@ func TestLockPOLUnlock(t *testing.T) {
|
||||
cs1, vss := randConsensusState(4)
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
|
||||
partSize := config.Consensus.BlockPartSize
|
||||
partSize := cs1.state.Params().BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
|
||||
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
|
||||
@@ -703,7 +701,7 @@ func TestLockPOLSafety1(t *testing.T) {
|
||||
cs1, vss := randConsensusState(4)
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
|
||||
partSize := config.Consensus.BlockPartSize
|
||||
partSize := cs1.state.Params().BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
|
||||
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
|
||||
@@ -824,7 +822,7 @@ func TestLockPOLSafety2(t *testing.T) {
|
||||
cs1, vss := randConsensusState(4)
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
|
||||
partSize := config.Consensus.BlockPartSize
|
||||
partSize := cs1.state.Params().BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
|
||||
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
|
||||
@@ -999,7 +997,7 @@ func TestHalt1(t *testing.T) {
|
||||
cs1, vss := randConsensusState(4)
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
|
||||
partSize := config.Consensus.BlockPartSize
|
||||
partSize := cs1.state.Params().BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
|
||||
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
|
||||
|
@@ -3,7 +3,7 @@ package consensus
|
||||
import (
|
||||
"time"
|
||||
|
||||
. "github.com/tendermint/tmlibs/common"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
)
|
||||
|
||||
@@ -29,24 +29,26 @@ type TimeoutTicker interface {
|
||||
// Timeouts are scheduled along the tickChan,
|
||||
// and fired on the tockChan.
|
||||
type timeoutTicker struct {
|
||||
BaseService
|
||||
cmn.BaseService
|
||||
|
||||
timer *time.Timer
|
||||
tickChan chan timeoutInfo
|
||||
tockChan chan timeoutInfo
|
||||
tickChan chan timeoutInfo // for scheduling timeouts
|
||||
tockChan chan timeoutInfo // for notifying about them
|
||||
}
|
||||
|
||||
// NewTimeoutTicker returns a new TimeoutTicker.
|
||||
func NewTimeoutTicker() TimeoutTicker {
|
||||
tt := &timeoutTicker{
|
||||
timer: time.NewTimer(0),
|
||||
tickChan: make(chan timeoutInfo, tickTockBufferSize),
|
||||
tockChan: make(chan timeoutInfo, tickTockBufferSize),
|
||||
}
|
||||
tt.BaseService = *NewBaseService(nil, "TimeoutTicker", tt)
|
||||
tt.BaseService = *cmn.NewBaseService(nil, "TimeoutTicker", tt)
|
||||
tt.stopTimer() // don't want to fire until the first scheduled timeout
|
||||
return tt
|
||||
}
|
||||
|
||||
// OnStart implements cmn.Service. It starts the timeout routine.
|
||||
func (t *timeoutTicker) OnStart() error {
|
||||
|
||||
go t.timeoutRoutine()
|
||||
@@ -54,16 +56,19 @@ func (t *timeoutTicker) OnStart() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements cmn.Service. It stops the timeout routine.
|
||||
func (t *timeoutTicker) OnStop() {
|
||||
t.BaseService.OnStop()
|
||||
t.stopTimer()
|
||||
}
|
||||
|
||||
// Chan returns a channel on which timeouts are sent.
|
||||
func (t *timeoutTicker) Chan() <-chan timeoutInfo {
|
||||
return t.tockChan
|
||||
}
|
||||
|
||||
// The timeoutRoutine is alwaya available to read from tickChan (it won't block).
|
||||
// ScheduleTimeout schedules a new timeout by sending on the internal tickChan.
|
||||
// The timeoutRoutine is alwaya available to read from tickChan, so this won't block.
|
||||
// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step.
|
||||
func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) {
|
||||
t.tickChan <- ti
|
||||
|
20
docs/Makefile
Normal file
20
docs/Makefile
Normal file
@@ -0,0 +1,20 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = python -msphinx
|
||||
SPHINXPROJ = Tendermint
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
14
docs/README.md
Normal file
14
docs/README.md
Normal file
@@ -0,0 +1,14 @@
|
||||
Here lies our documentation. After making edits, run:
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
make html
|
||||
```
|
||||
|
||||
to build the docs locally then open the file `_build/html/index.html` in your browser.
|
||||
|
||||
**WARNING:** This documentation is intended to be viewed at:
|
||||
|
||||
https://tendermint.readthedocs.io
|
||||
|
||||
and may contain broken internal links when viewed from Github.
|
255
docs/abci-cli.rst
Normal file
255
docs/abci-cli.rst
Normal file
@@ -0,0 +1,255 @@
|
||||
Using ABCI-CLI
|
||||
==============
|
||||
|
||||
To facilitate testing and debugging of ABCI servers and simple apps, we
|
||||
built a CLI, the ``abci-cli``, for sending ABCI messages from the
|
||||
command line.
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
Make sure you `have Go installed <https://golang.org/doc/install>`__.
|
||||
|
||||
Next, install the ``abci-cli`` tool and example applications:
|
||||
|
||||
::
|
||||
|
||||
go get -u github.com/tendermint/abci/cmd/...
|
||||
|
||||
If this fails, you may need to use ``glide`` to get vendored
|
||||
dependencies:
|
||||
|
||||
::
|
||||
|
||||
go get github.com/Masterminds/glide
|
||||
cd $GOPATH/src/github.com/tendermint/abci
|
||||
glide install
|
||||
go install ./cmd/...
|
||||
|
||||
Now run ``abci-cli --help`` to see the list of commands:
|
||||
|
||||
::
|
||||
|
||||
COMMANDS:
|
||||
batch Run a batch of ABCI commands against an application
|
||||
console Start an interactive console for multiple commands
|
||||
echo Have the application echo a message
|
||||
info Get some info about the application
|
||||
set_option Set an option on the application
|
||||
deliver_tx Append a new tx to application
|
||||
check_tx Validate a tx
|
||||
commit Get application Merkle root hash
|
||||
help, h Shows a list of commands or help for one command
|
||||
|
||||
GLOBAL OPTIONS:
|
||||
--address "tcp://127.0.0.1:46658" address of application socket
|
||||
--help, -h show help
|
||||
--version, -v print the version
|
||||
|
||||
Dummy - First Example
|
||||
---------------------
|
||||
|
||||
The ``abci-cli`` tool lets us send ABCI messages to our application, to
|
||||
help build and debug them.
|
||||
|
||||
The most important messages are ``deliver_tx``, ``check_tx``, and
|
||||
``commit``, but there are others for convenience, configuration, and
|
||||
information purposes.
|
||||
|
||||
Let's start a dummy application, which was installed at the same time as
|
||||
``abci-cli`` above. The dummy just stores transactions in a merkle tree:
|
||||
|
||||
::
|
||||
|
||||
dummy
|
||||
|
||||
In another terminal, run
|
||||
|
||||
::
|
||||
|
||||
abci-cli echo hello
|
||||
abci-cli info
|
||||
|
||||
The application should echo ``hello`` and give you some information
|
||||
about itself.
|
||||
|
||||
An ABCI application must provide two things:
|
||||
|
||||
- a socket server
|
||||
- a handler for ABCI messages
|
||||
|
||||
When we run the ``abci-cli`` tool we open a new connection to the
|
||||
application's socket server, send the given ABCI message, and wait for a
|
||||
response.
|
||||
|
||||
The server may be generic for a particular language, and we provide a
|
||||
`reference implementation in
|
||||
Golang <https://github.com/tendermint/abci/tree/master/server>`__. See
|
||||
the `list of other ABCI
|
||||
implementations <https://tendermint.com/ecosystem>`__ for servers in
|
||||
other languages.
|
||||
|
||||
The handler is specific to the application, and may be arbitrary, so
|
||||
long as it is deterministic and conforms to the ABCI interface
|
||||
specification.
|
||||
|
||||
So when we run ``abci-cli info``, we open a new connection to the ABCI
|
||||
server, which calls the ``Info()`` method on the application, which
|
||||
tells us the number of transactions in our Merkle tree.
|
||||
|
||||
Now, since every command opens a new connection, we provide the
|
||||
``abci-cli console`` and ``abci-cli batch`` commands, to allow multiple
|
||||
ABCI messages to be sent over a single connection.
|
||||
|
||||
Running ``abci-cli console`` should drop you in an interactive console
|
||||
for speaking ABCI messages to your application.
|
||||
|
||||
Try running these commands:
|
||||
|
||||
::
|
||||
|
||||
> echo hello
|
||||
-> data: hello
|
||||
|
||||
> info
|
||||
-> data: {"size":0}
|
||||
|
||||
> commit
|
||||
-> data: 0x
|
||||
|
||||
> deliver_tx "abc"
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
-> data: {"size":1}
|
||||
|
||||
> commit
|
||||
-> data: 0x750502FC7E84BBD788ED589624F06CFA871845D1
|
||||
|
||||
> query "abc"
|
||||
-> code: OK
|
||||
-> data: {"index":0,"value":"abc","exists":true}
|
||||
|
||||
> deliver_tx "def=xyz"
|
||||
-> code: OK
|
||||
|
||||
> commit
|
||||
-> data: 0x76393B8A182E450286B0694C629ECB51B286EFD5
|
||||
|
||||
> query "def"
|
||||
-> code: OK
|
||||
-> data: {"index":1,"value":"xyz","exists":true}
|
||||
|
||||
Note that if we do ``deliver_tx "abc"`` it will store ``(abc, abc)``,
|
||||
but if we do ``deliver_tx "abc=efg"`` it will store ``(abc, efg)``.
|
||||
|
||||
Similarly, you could put the commands in a file and run
|
||||
``abci-cli --verbose batch < myfile``.
|
||||
|
||||
Counter - Another Example
|
||||
-------------------------
|
||||
|
||||
Now that we've got the hang of it, let's try another application, the
|
||||
"counter" app.
|
||||
|
||||
The counter app doesn't use a Merkle tree, it just counts how many times
|
||||
we've sent a transaction, asked for a hash, or committed the state. The
|
||||
result of ``commit`` is just the number of transactions sent.
|
||||
|
||||
This application has two modes: ``serial=off`` and ``serial=on``.
|
||||
|
||||
When ``serial=on``, transactions must be a big-endian encoded
|
||||
incrementing integer, starting at 0.
|
||||
|
||||
If ``serial=off``, there are no restrictions on transactions.
|
||||
|
||||
We can toggle the value of ``serial`` using the ``set_option`` ABCI
|
||||
message.
|
||||
|
||||
When ``serial=on``, some transactions are invalid. In a live blockchain,
|
||||
transactions collect in memory before they are committed into blocks. To
|
||||
avoid wasting resources on invalid transactions, ABCI provides the
|
||||
``check_tx`` message, which application developers can use to accept or
|
||||
reject transactions, before they are stored in memory or gossipped to
|
||||
other peers.
|
||||
|
||||
In this instance of the counter app, ``check_tx`` only allows
|
||||
transactions whose integer is greater than the last committed one.
|
||||
|
||||
Let's kill the console and the dummy application, and start the counter
|
||||
app:
|
||||
|
||||
::
|
||||
|
||||
counter
|
||||
|
||||
In another window, start the ``abci-cli console``:
|
||||
|
||||
::
|
||||
|
||||
> set_option serial on
|
||||
-> data: serial=on
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0xff
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: BadNonce
|
||||
-> log: Invalid nonce. Expected >= 1, got 0
|
||||
|
||||
> deliver_tx 0x01
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x04
|
||||
-> code: BadNonce
|
||||
-> log: Invalid nonce. Expected 2, got 4
|
||||
|
||||
> info
|
||||
-> data: {"hashes":0,"txs":2}
|
||||
|
||||
This is a very simple application, but between ``counter`` and
|
||||
``dummy``, its easy to see how you can build out arbitrary application
|
||||
states on top of the ABCI. `Hyperledger's
|
||||
Burrow <https://github.com/hyperledger/burrow>`__ also runs atop ABCI,
|
||||
bringing with it Ethereum-like accounts, the Ethereum virtual-machine,
|
||||
Monax's permissioning scheme, and native contracts extensions.
|
||||
|
||||
But the ultimate flexibility comes from being able to write the
|
||||
application easily in any language.
|
||||
|
||||
We have implemented the counter in a number of languages (see the
|
||||
example directory).
|
||||
|
||||
To run the Node JS version, ``cd`` to ``example/js`` and run
|
||||
|
||||
::
|
||||
|
||||
node app.js
|
||||
|
||||
(you'll have to kill the other counter application process). In another
|
||||
window, run the console and those previous ABCI commands. You should get
|
||||
the same results as for the Go version.
|
||||
|
||||
Bounties
|
||||
--------
|
||||
|
||||
Want to write the counter app in your favorite language?! We'd be happy
|
||||
to add you to our `ecosystem <https://tendermint.com/ecosystem>`__!
|
||||
We're also offering `bounties <https://tendermint.com/bounties>`__ for
|
||||
implementations in new languages!
|
||||
|
||||
The ``abci-cli`` is designed strictly for testing and debugging. In a
|
||||
real deployment, the role of sending messages is taken by Tendermint,
|
||||
which connects to the app using three separate connections, each with
|
||||
its own pattern of messages.
|
||||
|
||||
For more information, see the `application developers
|
||||
guide <./app-development.html>`__. For examples of running an ABCI
|
||||
app with Tendermint, see the `getting started
|
||||
guide <./getting-started.html>`__.
|
125
docs/app-architecture.rst
Normal file
125
docs/app-architecture.rst
Normal file
@@ -0,0 +1,125 @@
|
||||
Application Architecture Guide
|
||||
==============================
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
A blockchain application is more than the consensus engine and the
|
||||
transaction logic (eg. smart contracts, business logic) as implemented
|
||||
in the ABCI app. There are also (mobile, web, desktop) clients that will
|
||||
need to connect and make use of the app. We will assume for now that you
|
||||
have a well designed transactions and database model, but maybe this
|
||||
will be the topic of another article. This article is more interested in
|
||||
various ways of setting up the "plumbing" and connecting these pieces,
|
||||
and demonstrating some evolving best practices.
|
||||
|
||||
Security
|
||||
--------
|
||||
|
||||
A very important aspect when constructing a blockchain is security. The
|
||||
consensus model can be DoSed (no consensus possible) by corrupting 1/3
|
||||
of the validators and exploited (writing arbitrary blocks) by corrupting
|
||||
2/3 of the validators. So, while the security is not that of the
|
||||
"weakest link", you should take care that the "average link" is
|
||||
sufficiently hardened.
|
||||
|
||||
One big attack surface on the validators is the communication between
|
||||
the ABCI app and the tendermint core. This should be highly protected.
|
||||
Ideally, the app and the core are running on the same machine, so no
|
||||
external agent can target the communication channel. You can use unix
|
||||
sockets (with permissions preventing access from other users), or even
|
||||
compile the two apps into one binary if the ABCI app is also writen in
|
||||
go. If you are unable to do that due to language support, then the ABCI
|
||||
app should bind a TCP connection to localhost (127.0.0.1), which is less
|
||||
efficient and secure, but still not reachable from outside. If you must
|
||||
run the ABCI app and tendermint core on separate machines, make sure you
|
||||
have a secure communication channel (ssh tunnel?)
|
||||
|
||||
Now assuming, you have linked together your app and the core securely,
|
||||
you must also make sure no one can get on the machine it is hosted on.
|
||||
At this point it is basic network security. Run on a secure operating
|
||||
system (SELinux?). Limit who has access to the machine (user accounts,
|
||||
but also where the physical machine is hosted). Turn off all services
|
||||
except for ssh, which should only be accessible by some well-guarded
|
||||
public/private key pairs (no password). And maybe even firewall off
|
||||
access to the ports used by the validators, so only known validators can
|
||||
connect.
|
||||
|
||||
There was also a suggestion on slack from @jhon about compiling
|
||||
everything together with a unikernel for more security, such as
|
||||
`Mirage <https://mirage.io>`__ or
|
||||
`UNIK <https://github.com/emc-advanced-dev/unik>`__.
|
||||
|
||||
Connecting your client to the blockchain
|
||||
----------------------------------------
|
||||
|
||||
Tendermint Core RPC
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The concept is that the ABCI app is completely hidden from the outside
|
||||
world and only communicated through a tested and secured `interface
|
||||
exposed by the tendermint core <./specification/rpc.html>`__. This interface
|
||||
exposes a lot of data on the block header and consensus process, which
|
||||
is quite useful for externally verifying the system. It also includes
|
||||
3(!) methods to broadcast a transaction (propose it for the blockchain,
|
||||
and possibly await a response). And one method to query app-specific
|
||||
data from the ABCI application.
|
||||
|
||||
Pros:
|
||||
* Server code already written
|
||||
* Access to block headers to validate merkle proofs (nice for light clients)
|
||||
* Basic read/write functionality is supported
|
||||
|
||||
Cons:
|
||||
* Limited interface to app. All queries must be serialized into
|
||||
[]byte (less expressive than JSON over HTTP) and there is no way to push
|
||||
data from ABCI app to the client (eg. notify me if account X receives a
|
||||
transaction)
|
||||
|
||||
Custom ABCI server
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This was proposed by @wolfposd on slack and demonstrated by
|
||||
`TMChat <https://github.com/wolfposd/TMChat>`__, a sample app. The
|
||||
concept is to write a custom server for your app (with typical REST
|
||||
API/websockets/etc for easy use by a mobile app). This custom server is
|
||||
in the same binary as the ABCI app and data store, so can easily react
|
||||
to complex events there that involve understanding the data format (send
|
||||
a message if my balance drops below 500). All "writes" sent to this
|
||||
server are proxied via websocket/JSON-RPC to tendermint core. When they
|
||||
come back as deliver\_tx over ABCI, they will be written to the data
|
||||
store. For "reads", we can do any queries we wish that are supported by
|
||||
our architecture, using any web technology that is useful. The general
|
||||
architecture is shown in the following diagram:
|
||||
|
||||
Pros: \* Separates application logic from blockchain logic \* Allows
|
||||
much richer, more flexible client-facing API \* Allows pub-sub, watching
|
||||
certain fields, etc.
|
||||
|
||||
Cons: \* Access to ABCI app can be dangerous (be VERY careful not to
|
||||
write unless it comes from the validator node) \* No direct access to
|
||||
the blockchain headers to verify tx \* You must write your own API (but
|
||||
maybe that's a pro...)
|
||||
|
||||
Hybrid solutions
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Likely the least secure but most versatile. The client can access both
|
||||
the tendermint node for all blockchain info, as well as a custom app
|
||||
server, for complex queries and pub-sub on the abci app.
|
||||
|
||||
Pros: All from both above solutions
|
||||
|
||||
Cons: Even more complexity; even more attack vectors (less
|
||||
security)
|
||||
|
||||
Scalability
|
||||
-----------
|
||||
|
||||
Read replica using non-validating nodes? They could forward transactions
|
||||
to the validators (fewer connections, more security), and locally allow
|
||||
all queries in any of the above configurations. Thus, while
|
||||
transaction-processing speed is limited by the speed of the abci app and
|
||||
the number of validators, one should be able to scale our read
|
||||
performance to quite an extent (until the replication process drains too
|
||||
many resources from the validator nodes).
|
299
docs/app-development.rst
Normal file
299
docs/app-development.rst
Normal file
@@ -0,0 +1,299 @@
|
||||
Application Development Guide
|
||||
=============================
|
||||
|
||||
ABCI Design
|
||||
-----------
|
||||
|
||||
The purpose of ABCI is to provide a clean interface between state
|
||||
transition machines on one computer and the mechanics of their
|
||||
replication across multiple computers. The former we call 'application
|
||||
logic' and the latter the 'consensus engine'. Application logic
|
||||
validates transactions and optionally executes transactions against some
|
||||
persistent state. A consensus engine ensures all transactions are
|
||||
replicated in the same order on every machine. We call each machine in a
|
||||
consensus engine a 'validator', and each validator runs the same
|
||||
transactions through the same application logic. In particular, we are
|
||||
interested in blockchain-style consensus engines, where transactions are
|
||||
committed in hash-linked blocks.
|
||||
|
||||
The ABCI design has a few distinct components:
|
||||
|
||||
- message protocol
|
||||
|
||||
- pairs of request and response messages
|
||||
- consensus makes requests, application responds
|
||||
- defined using protobuf
|
||||
|
||||
- server/client
|
||||
|
||||
- consensus engine runs the client
|
||||
- application runs the server
|
||||
- two implementations:
|
||||
|
||||
- async raw bytes
|
||||
- grpc
|
||||
|
||||
- blockchain protocol
|
||||
|
||||
- abci is connection oriented
|
||||
- Tendermint Core maintains three connections:
|
||||
|
||||
- `mempool connection <#mempool-connection>`__: for checking if
|
||||
transactions should be relayed before they are committed; only
|
||||
uses ``CheckTx``
|
||||
- `consensus connection <#consensus-connection>`__: for executing
|
||||
transactions that have been committed. Message sequence is -
|
||||
for every block -
|
||||
``BeginBlock, [DeliverTx, ...], EndBlock, Commit``
|
||||
- `query connection <#query-connection>`__: for querying the
|
||||
application state; only uses Query and Info
|
||||
|
||||
The mempool and consensus logic act as clients, and each maintains an
|
||||
open ABCI connection with the application, which hosts an ABCI server.
|
||||
Shown are the request and response types sent on each connection.
|
||||
|
||||
Message Protocol
|
||||
----------------
|
||||
|
||||
The message protocol consists of pairs of requests and responses. Some
|
||||
messages have no fields, while others may include byte-arrays, strings,
|
||||
or integers. See the ``message Request`` and ``message Response``
|
||||
definitions in `the protobuf definition
|
||||
file <https://github.com/tendermint/abci/blob/master/types/types.proto>`__,
|
||||
and the `protobuf
|
||||
documentation <https://developers.google.com/protocol-buffers/docs/overview>`__
|
||||
for more details.
|
||||
|
||||
For each request, a server should respond with the corresponding
|
||||
response, where order of requests is preserved in the order of
|
||||
responses.
|
||||
|
||||
Server
|
||||
------
|
||||
|
||||
To use ABCI in your programming language of choice, there must be a ABCI
|
||||
server in that language. Tendermint supports two kinds of implementation
|
||||
of the server:
|
||||
|
||||
- Asynchronous, raw socket server (Tendermint Socket Protocol, also
|
||||
known as TSP or Teaspoon)
|
||||
- GRPC
|
||||
|
||||
Both can be tested using the ``abci-cli`` by setting the ``--abci`` flag
|
||||
appropriately (ie. to ``socket`` or ``grpc``).
|
||||
|
||||
See examples, in various stages of maintenance, in
|
||||
`Go <https://github.com/tendermint/abci/tree/master/server>`__,
|
||||
`JavaScript <https://github.com/tendermint/js-abci>`__,
|
||||
`Python <https://github.com/tendermint/abci/tree/master/example/python3/abci>`__,
|
||||
`C++ <https://github.com/mdyring/cpp-tmsp>`__, and
|
||||
`Java <https://github.com/jTendermint/jabci>`__.
|
||||
|
||||
GRPC
|
||||
~~~~
|
||||
|
||||
If GRPC is available in your language, this is the easiest approach,
|
||||
though it will have significant performance overhead.
|
||||
|
||||
To get started with GRPC, copy in the `protobuf
|
||||
file <https://github.com/tendermint/abci/blob/master/types/types.proto>`__
|
||||
and compile it using the GRPC plugin for your language. For instance,
|
||||
for golang, the command is
|
||||
``protoc --go_out=plugins=grpc:. types.proto``. See the `grpc
|
||||
documentation for more details <http://www.grpc.io/docs/>`__. ``protoc``
|
||||
will autogenerate all the necessary code for ABCI client and server in
|
||||
your language, including whatever interface your application must
|
||||
satisfy to be used by the ABCI server for handling requests.
|
||||
|
||||
TSP
|
||||
~~~
|
||||
|
||||
If GRPC is not available in your language, or you require higher
|
||||
performance, or otherwise enjoy programming, you may implement your own
|
||||
ABCI server using the Tendermint Socket Protocol, known affectionately
|
||||
as Teaspoon. The first step is still to auto-generate the relevant data
|
||||
types and codec in your language using ``protoc``. Messages coming over
|
||||
the socket are Protobuf3 encoded, but additionally length-prefixed to
|
||||
facilitate use as a streaming protocol. Protobuf3 doesn't have an
|
||||
official length-prefix standard, so we use our own. The first byte in
|
||||
the prefix represents the length of the Big Endian encoded length. The
|
||||
remaining bytes in the prefix are the Big Endian encoded length.
|
||||
|
||||
For example, if the Protobuf3 encoded ABCI message is 0xDEADBEEF (4
|
||||
bytes), the length-prefixed message is 0x0104DEADBEEF. If the Protobuf3
|
||||
encoded ABCI message is 65535 bytes long, the length-prefixed message
|
||||
would be like 0x02FFFF....
|
||||
|
||||
Note this prefixing does not apply for grpc.
|
||||
|
||||
An ABCI server must also be able to support multiple connections, as
|
||||
Tendermint uses three connections.
|
||||
|
||||
Client
|
||||
------
|
||||
|
||||
There are currently two use-cases for an ABCI client. One is a testing
|
||||
tool, as in the ``abci-cli``, which allows ABCI requests to be sent via
|
||||
command line. The other is a consensus engine, such as Tendermint Core,
|
||||
which makes requests to the application every time a new transaction is
|
||||
received or a block is committed.
|
||||
|
||||
It is unlikely that you will need to implement a client. For details of
|
||||
our client, see
|
||||
`here <https://github.com/tendermint/abci/tree/master/client>`__.
|
||||
|
||||
Blockchain Protocol
|
||||
-------------------
|
||||
|
||||
In ABCI, a transaction is simply an arbitrary length byte-array. It is
|
||||
the application's responsibility to define the transaction codec as they
|
||||
please, and to use it for both CheckTx and DeliverTx.
|
||||
|
||||
Note that there are two distinct means for running transactions,
|
||||
corresponding to stages of 'awareness' of the transaction in the
|
||||
network. The first stage is when a transaction is received by a
|
||||
validator from a client into the so-called mempool or transaction pool -
|
||||
this is where we use CheckTx. The second is when the transaction is
|
||||
successfully committed on more than 2/3 of validators - where we use
|
||||
DeliverTx. In the former case, it may not be necessary to run all the
|
||||
state transitions associated with the transaction, as the transaction
|
||||
may not ultimately be committed until some much later time, when the
|
||||
result of its execution will be different. For instance, an Ethereum
|
||||
ABCI app would check signatures and amounts in CheckTx, but would not
|
||||
actually execute any contract code until the DeliverTx, so as to avoid
|
||||
executing state transitions that have not been finalized.
|
||||
|
||||
To formalize the distinction further, two explicit ABCI connections are
|
||||
made between Tendermint Core and the application: the mempool connection
|
||||
and the consensus connection. We also make a third connection, the query
|
||||
connection, to query the local state of the app.
|
||||
|
||||
Mempool Connection
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The mempool connection is used *only* for CheckTx requests. Transactions
|
||||
are run using CheckTx in the same order they were received by the
|
||||
validator. If the CheckTx returns ``OK``, the transaction is kept in
|
||||
memory and relayed to other peers in the same order it was received.
|
||||
Otherwise, it is discarded.
|
||||
|
||||
CheckTx requests run concurrently with block processing; so they should
|
||||
run against a copy of the main application state which is reset after
|
||||
every block. This copy is necessary to track transitions made by a
|
||||
sequence of CheckTx requests before they are included in a block. When a
|
||||
block is committed, the application must ensure to reset the mempool
|
||||
state to the latest committed state. Tendermint Core will then filter
|
||||
through all transactions in the mempool, removing any that were included
|
||||
in the block, and re-run the rest using CheckTx against the post-Commit
|
||||
mempool state.
|
||||
|
||||
Consensus Connection
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The consensus connection is used only when a new block is committed, and
|
||||
communicates all information from the block in a series of requests:
|
||||
``BeginBlock, [DeliverTx, ...], EndBlock, Commit``. That is, when a
|
||||
block is committed in the consensus, we send a list of DeliverTx
|
||||
requests (one for each transaction) sandwiched by BeginBlock and
|
||||
EndBlock requests, and followed by a Commit.
|
||||
|
||||
DeliverTx
|
||||
^^^^^^^^^
|
||||
|
||||
DeliverTx is the workhorse of the blockchain. Tendermint sends the
|
||||
DeliverTx requests asynchronously but in order, and relies on the
|
||||
underlying socket protocol (ie. TCP) to ensure they are received by the
|
||||
app in order. They have already been ordered in the global consensus by
|
||||
the Tendermint protocol.
|
||||
|
||||
DeliverTx returns a abci.Result, which includes a Code, Data, and Log.
|
||||
The code may be non-zero (non-OK), meaning the corresponding transaction
|
||||
should have been rejected by the mempool, but may have been included in
|
||||
a block by a Byzantine proposer.
|
||||
|
||||
The block header will be updated (TODO) to include some commitment to
|
||||
the results of DeliverTx, be it a bitarray of non-OK transactions, or a
|
||||
merkle root of the data returned by the DeliverTx requests, or both.
|
||||
|
||||
Commit
|
||||
^^^^^^
|
||||
|
||||
Once all processing of the block is complete, Tendermint sends the
|
||||
Commit request and blocks waiting for a response. While the mempool may
|
||||
run concurrently with block processing (the BeginBlock, DeliverTxs, and
|
||||
EndBlock), it is locked for the Commit request so that its state can be
|
||||
safely reset during Commit. This means the app *MUST NOT* do any
|
||||
blocking communication with the mempool (ie. broadcast\_tx) during
|
||||
Commit, or there will be deadlock. Note also that all remaining
|
||||
transactions in the mempool are replayed on the mempool connection
|
||||
(CheckTx) following a commit.
|
||||
|
||||
The Commit response includes a byte array, which is the deterministic
|
||||
state root of the application. It is included in the header of the next
|
||||
block. It can be used to provide easily verified Merkle-proofs of the
|
||||
state of the application.
|
||||
|
||||
It is expected that the app will persist state to disk on Commit. The
|
||||
option to have all transactions replayed from some previous block is the
|
||||
job of the `Handshake <#handshake>`__.
|
||||
|
||||
BeginBlock
|
||||
^^^^^^^^^^
|
||||
|
||||
The BeginBlock request can be used to run some code at the beginning of
|
||||
every block. It also allows Tendermint to send the current block hash
|
||||
and header to the application, before it sends any of the transactions.
|
||||
|
||||
The app should remember the latest height and header (ie. from which it
|
||||
has run a successful Commit) so that it can tell Tendermint where to
|
||||
pick up from when it restarts. See information on the Handshake, below.
|
||||
|
||||
EndBlock
|
||||
^^^^^^^^
|
||||
|
||||
The EndBlock request can be used to run some code at the end of every
|
||||
block. Additionally, the response may contain a list of validators,
|
||||
which can be used to update the validator set. To add a new validator or
|
||||
update an existing one, simply include them in the list returned in the
|
||||
EndBlock response. To remove one, include it in the list with a
|
||||
``power`` equal to ``0``. Tendermint core will take care of updating the
|
||||
validator set. Note validator set changes are only available in v0.8.0
|
||||
and up.
|
||||
|
||||
Query Connection
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
This connection is used to query the application without engaging
|
||||
consensus. It's exposed over the tendermint core rpc, so clients can
|
||||
query the app without exposing a server on the app itself, but they must
|
||||
serialize each query as a single byte array. Additionally, certain
|
||||
"standardized" queries may be used to inform local decisions, for
|
||||
instance about which peers to connect to.
|
||||
|
||||
Tendermint Core currently uses the Query connection to filter peers upon
|
||||
connecting, according to IP address or public key. For instance,
|
||||
returning non-OK ABCI response to either of the following queries will
|
||||
cause Tendermint to not connect to the corresponding peer:
|
||||
|
||||
- ``p2p/filter/addr/<addr>``, where ``<addr>`` is an IP address.
|
||||
- ``p2p/filter/pubkey/<pubkey>``, where ``<pubkey>`` is the hex-encoded
|
||||
ED25519 key of the node (not it's validator key)
|
||||
|
||||
Note: these query formats are subject to change!
|
||||
|
||||
Handshake
|
||||
~~~~~~~~~
|
||||
|
||||
When the app or tendermint restarts, they need to sync to a common
|
||||
height. When an ABCI connection is first established, Tendermint will
|
||||
call ``Info`` on the Query connection. The response should contain the
|
||||
LastBlockHeight and LastBlockAppHash - the former is the last block for
|
||||
the which the app ran Commit successfully, the latter is the response
|
||||
from that Commit.
|
||||
|
||||
Using this information, Tendermint will determine what needs to be
|
||||
replayed, if anything, against the app, to ensure both Tendermint and
|
||||
the app are synced to the latest block height.
|
||||
|
||||
If the app returns a LastBlockHeight of 0, Tendermint will just replay
|
||||
all blocks.
|
@@ -1,16 +0,0 @@
|
||||
# ABCI
|
||||
|
||||
ABCI is an interface between the consensus/blockchain engine known as tendermint, and the application-specific business logic, known as an ABCi app.
|
||||
|
||||
The tendermint core should run unchanged for all apps. Each app can customize it, the supported transactions, queries, even the validator sets and how to handle staking / slashing stake. This customization is achieved by implementing the ABCi app to send the proper information to the tendermint engine to perform as directed.
|
||||
|
||||
To understand this decision better, think of the design of the tendermint engine.
|
||||
|
||||
* A blockchain is simply consensus on a unique global ordering of events.
|
||||
* This consensus can efficiently be implemented using BFT and PoS
|
||||
* This code can be generalized to easily support a large number of blockchains
|
||||
* The block-chain specific code, the interpretation of the individual events, can be implemented by a 3rd party app without touching the consensus engine core
|
||||
* Use an efficient, language-agnostic layer to implement this (ABCi)
|
||||
|
||||
|
||||
Bucky, please make this doc real.
|
@@ -2,15 +2,4 @@
|
||||
|
||||
This is a location to record all high-level architecture decisions in the tendermint project. Not the implementation details, but the reasoning that happened. This should be refered to for guidance of the "right way" to extend the application. And if we notice that the original decisions were lacking, we should have another open discussion, record the new decisions here, and then modify the code to match.
|
||||
|
||||
This is like our guide and mentor when Jae and Bucky are offline.... The concept comes from a [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t) that resonated among the team when Anton shared it.
|
||||
|
||||
Each section of the code can have it's own markdown file in this directory, and please add a link to the readme.
|
||||
|
||||
## Sections
|
||||
|
||||
* [ABCI](./ABCI.md)
|
||||
* [go-merkle / merkleeyes](./merkle.md)
|
||||
* [Frey's thoughts on the data store](./merkle-frey.md)
|
||||
* basecoin
|
||||
* tendermint core (multiple sections)
|
||||
* ???
|
||||
Read up on the concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t).
|
||||
|
90
docs/architecture/adr-002-event-subscription.md
Normal file
90
docs/architecture/adr-002-event-subscription.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# ADR 2: Event Subscription
|
||||
|
||||
## Context
|
||||
|
||||
In the light client (or any other client), the user may want to **subscribe to
|
||||
a subset of transactions** (rather than all of them) using `/subscribe?event=X`. For
|
||||
example, I want to subscribe for all transactions associated with a particular
|
||||
account. Same for fetching. The user may want to **fetch transactions based on
|
||||
some filter** (rather than fetching all the blocks). For example, I want to get
|
||||
all transactions for a particular account in the last two weeks (`tx's block
|
||||
time >= '2017-06-05'`).
|
||||
|
||||
Now you can't even subscribe to "all txs" in Tendermint.
|
||||
|
||||
The goal is a simple and easy to use API for doing that.
|
||||
|
||||

|
||||
|
||||
## Decision
|
||||
|
||||
ABCI app return tags with a `DeliverTx` response inside the `data` field (_for
|
||||
now, later we may create a separate field_). Tags is a list of key-value pairs,
|
||||
protobuf encoded.
|
||||
|
||||
Example data:
|
||||
|
||||
```json
|
||||
{
|
||||
"abci.account.name": "Igor",
|
||||
"abci.account.address": "0xdeadbeef",
|
||||
"tx.gas": 7
|
||||
}
|
||||
```
|
||||
|
||||
### Subscribing for transactions events
|
||||
|
||||
If the user wants to receive only a subset of transactions, ABCI-app must
|
||||
return a list of tags with a `DeliverTx` response. These tags will be parsed and
|
||||
matched with the current queries (subscribers). If the query matches the tags,
|
||||
subscriber will get the transaction event.
|
||||
|
||||
```
|
||||
/subscribe?query="tm.event = Tx AND tx.hash = AB0023433CF0334223212243BDD AND abci.account.invoice.number = 22"
|
||||
```
|
||||
|
||||
A new package must be developed to replace the current `events` package. It
|
||||
will allow clients to subscribe to a different types of events in the future:
|
||||
|
||||
```
|
||||
/subscribe?query="abci.account.invoice.number = 22"
|
||||
/subscribe?query="abci.account.invoice.owner CONTAINS Igor"
|
||||
```
|
||||
|
||||
### Fetching transactions
|
||||
|
||||
This is a bit tricky because a) we want to support a number of indexers, all of
|
||||
which have a different API b) we don't know whenever tags will be sufficient
|
||||
for the most apps (I guess we'll see).
|
||||
|
||||
```
|
||||
/txs/search?query="tx.hash = AB0023433CF0334223212243BDD AND abci.account.owner CONTAINS Igor"
|
||||
/txs/search?query="abci.account.owner = Igor"
|
||||
```
|
||||
|
||||
For historic queries we will need a indexing storage (Postgres, SQLite, ...).
|
||||
|
||||
### Issues
|
||||
|
||||
- https://github.com/tendermint/basecoin/issues/91
|
||||
- https://github.com/tendermint/tendermint/issues/376
|
||||
- https://github.com/tendermint/tendermint/issues/287
|
||||
- https://github.com/tendermint/tendermint/issues/525 (related)
|
||||
|
||||
## Status
|
||||
|
||||
proposed
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- same format for event notifications and search APIs
|
||||
- powerful enough query
|
||||
|
||||
### Negative
|
||||
|
||||
- performance of the `match` function (where we have too many queries / subscribers)
|
||||
- there is an issue where there are too many txs in the DB
|
||||
|
||||
### Neutral
|
34
docs/architecture/adr-003-abci-app-rpc.md
Normal file
34
docs/architecture/adr-003-abci-app-rpc.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# ADR 3: Must an ABCI-app have an RPC server?
|
||||
|
||||
## Context
|
||||
|
||||
ABCI-server could expose its own RPC-server and act as a proxy to Tendermint.
|
||||
|
||||
The idea was for the Tendermint RPC to just be a transparent proxy to the app.
|
||||
Clients need to talk to Tendermint for proofs, unless we burden all app devs
|
||||
with exposing Tendermint proof stuff. Also seems less complex to lock down one
|
||||
server than two, but granted it makes querying a bit more kludgy since it needs
|
||||
to be passed as a `Query`. Also, **having a very standard rpc interface means
|
||||
the light-client can work with all apps and handle proofs**. The only
|
||||
app-specific logic is decoding the binary data to a more readable form (eg.
|
||||
json). This is a huge advantage for code-reuse and standardization.
|
||||
|
||||
## Decision
|
||||
|
||||
We dont expose an RPC server on any of our ABCI-apps.
|
||||
|
||||
## Status
|
||||
|
||||
accepted
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Unified interface for all apps
|
||||
|
||||
### Negative
|
||||
|
||||
- `Query` interface
|
||||
|
||||
### Neutral
|
38
docs/architecture/adr-004-historical-validators.md
Normal file
38
docs/architecture/adr-004-historical-validators.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# ADR 004: Historical Validators
|
||||
|
||||
## Context
|
||||
|
||||
Right now, we can query the present validator set, but there is no history.
|
||||
If you were offline for a long time, there is no way to reconstruct past validators. This is needed for the light client and we agreed needs enhancement of the API.
|
||||
|
||||
## Decision
|
||||
|
||||
For every block, store a new structure that contains either the latest validator set,
|
||||
or the height of the last block for which the validator set changed. Note this is not
|
||||
the height of the block which returned the validator set change itself, but the next block,
|
||||
ie. the first block it comes into effect for.
|
||||
|
||||
Storing the validators will be handled by the `state` package.
|
||||
|
||||
At some point in the future, we may consider more efficient storage in the case where the validators
|
||||
are updated frequently - for instance by only saving the diffs, rather than the whole set.
|
||||
|
||||
An alternative approach suggested keeping the validator set, or diffs of it, in a merkle IAVL tree.
|
||||
While it might afford cheaper proofs that a validator set has not changed, it would be more complex,
|
||||
and likely less efficient.
|
||||
|
||||
## Status
|
||||
|
||||
Accepted.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Can query old validator sets, with proof.
|
||||
|
||||
### Negative
|
||||
|
||||
- Writes an extra structure to disk with every block.
|
||||
|
||||
### Neutral
|
85
docs/architecture/adr-005-consensus-params.md
Normal file
85
docs/architecture/adr-005-consensus-params.md
Normal file
@@ -0,0 +1,85 @@
|
||||
# ADR 005: Consensus Params
|
||||
|
||||
## Context
|
||||
|
||||
Consensus critical parameters controlling blockchain capacity have until now been hard coded, loaded from a local config, or neglected.
|
||||
Since they may be need to be different in different networks, and potentially to evolve over time within
|
||||
networks, we seek to initialize them in a genesis file, and expose them through the ABCI.
|
||||
|
||||
While we have some specific parameters now, like maximum block and transaction size, we expect to have more in the future,
|
||||
such as a period over which evidence is valid, or the frequency of checkpoints.
|
||||
|
||||
## Decision
|
||||
|
||||
### ConsensusParams
|
||||
|
||||
No consensus critical parameters should ever be found in the `config.toml`.
|
||||
|
||||
A new `ConsensusParams` is optionally included in the `genesis.json` file,
|
||||
and loaded into the `State`. Any items not included are set to their default value.
|
||||
A value of 0 is undefined (see ABCI, below). A value of -1 is used to indicate the parameter does not apply.
|
||||
The parameters are used to determine the validity of a block (and tx) via the union of all relevant parameters.
|
||||
|
||||
```
|
||||
type ConsensusParams struct {
|
||||
BlockSizeParams
|
||||
TxSizeParams
|
||||
BlockGossipParams
|
||||
}
|
||||
|
||||
type BlockSizeParams struct {
|
||||
MaxBytes int
|
||||
MaxTxs int
|
||||
MaxGas int
|
||||
}
|
||||
|
||||
type TxSizeParams struct {
|
||||
MaxBytes int
|
||||
MaxGas int
|
||||
}
|
||||
|
||||
type BlockGossipParams struct {
|
||||
BlockPartSizeBytes int
|
||||
}
|
||||
```
|
||||
|
||||
The `ConsensusParams` can evolve over time by adding new structs that cover different aspects of the consensus rules.
|
||||
|
||||
The `BlockPartSizeBytes` and the `BlockSizeParams.MaxBytes` are enforced to be greater than 0.
|
||||
The former because we need a part size, the latter so that we always have at least some sanity check over the size of blocks.
|
||||
|
||||
### ABCI
|
||||
|
||||
#### InitChain
|
||||
|
||||
InitChain currently takes the initial validator set. It should be extended to also take parts of the ConsensusParams.
|
||||
There is some case to be made for it to take the entire Genesis, except there may be things in the genesis,
|
||||
like the BlockPartSize, that the app shouldn't really know about.
|
||||
|
||||
#### EndBlock
|
||||
|
||||
The EndBlock response includes a `ConsensusParams`, which includes BlockSizeParams and TxSizeParams, but not BlockGossipParams.
|
||||
Other param struct can be added to `ConsensusParams` in the future.
|
||||
The `0` value is used to denote no change.
|
||||
Any other value will update that parameter in the `State.ConsensusParams`, to be applied for the next block.
|
||||
Tendermint should have hard-coded upper limits as sanity checks.
|
||||
|
||||
## Status
|
||||
|
||||
Proposed.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- Alternative capacity limits and consensus parameters can be specified without re-compiling the software.
|
||||
- They can also change over time under the control of the application
|
||||
|
||||
### Negative
|
||||
|
||||
- More exposed parameters is more complexity
|
||||
- Different rules at different heights in the blockchain complicates fast sync
|
||||
|
||||
### Neutral
|
||||
|
||||
- The TxSizeParams, which checks validity, may be in conflict with the config's `max_block_size_tx`, which determines proposal sizes
|
16
docs/architecture/adr-template.md
Normal file
16
docs/architecture/adr-template.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# ADR 000: Template for an ADR
|
||||
|
||||
## Context
|
||||
|
||||
## Decision
|
||||
|
||||
## Status
|
||||
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
### Negative
|
||||
|
||||
### Neutral
|
BIN
docs/architecture/img/tags1.png
Normal file
BIN
docs/architecture/img/tags1.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 10 KiB |
@@ -1,240 +0,0 @@
|
||||
# Merkle data stores - Frey's proposal
|
||||
|
||||
## TL;DR
|
||||
|
||||
To allow the efficient creation of an ABCi app, tendermint wishes to provide a reference implementation of a key-value store that provides merkle proofs of the data. These proofs then quickly allow the ABCi app to provide an app hash to the consensus engine, as well as a full proof to any client.
|
||||
|
||||
This is equivalent to building a database, and I would propose designing it from the API first, then looking how to implement this (or make an adapter from the API to existing implementations). Once we agree on the functionality and the interface, we can implement the API bindings, and then work on building adapters to existence merkle-ized data stores, or modifying the stores to support this interface.
|
||||
|
||||
We need to consider the API (both in-process and over the network), language bindings, maintaining handles to old state (and garbage collecting), persistence, security, providing merkle proofs, and general key-value store operations. To stay consistent with the blockchains "single global order of operations", this data store should only allow one connection at a time to have write access.
|
||||
|
||||
## Overview
|
||||
|
||||
* **State**
|
||||
* There are two concepts of state, "committed state" and "working state"
|
||||
* The working state is only accessible from the ABCi app, allows writing, but does not need to support proofs.
|
||||
* When we commit the "working state", it becomes a new "committed state" and has an immutable root hash, provides proofs, and can be exposed to external clients.
|
||||
* **Transactions**
|
||||
* The database always allows creating a read-only transaction at the last "committed state", this transaction can serve read queries and proofs.
|
||||
* The database maintains all data to serve these read transactions until they are closed by the client (or time out). This allows the client(s) to determine how much old info is needed
|
||||
* The database can only support *maximal* one writable transaction at a time. This makes it easy to enforce serializability, and attempting to start a second writable transaction may trigger a panic.
|
||||
* **Functionality**
|
||||
* It must support efficient key-value operations (get/set/delete)
|
||||
* It must support returning merkle proofs for any "committed state"
|
||||
* It should support range queries on subsets of the key space if possible (ie. if the db doesn't hash keys)
|
||||
* It should also support listening to changes to a desired key via pub-sub or similar method, so I can quickly notify you on a change to your balance without constant polling.
|
||||
* It may support other db-specific query types as an extension to this interface, as long as all specified actions maintain their meaning.
|
||||
* **Interface**
|
||||
* This interface should be domain-specific - ie. designed just for this use case
|
||||
* It should present a simple go interface for embedding the data store in-process
|
||||
* It should create a gRPC/protobuf API for calling from any client
|
||||
* It should provide and maintain client adapters from our in-process interface to gRPC client calls for at least golang and Java (maybe more languages?)
|
||||
* It should provide and maintain server adapters from our gRPC calls to the in-process interface for golang at least (unless there is another server we wish to support)
|
||||
* **Persistence**
|
||||
* It must support atomic persistence upon committing a new block. That is, upon crash recovery, the state is guaranteed to represent the state at the end of a complete block (along with a note of which height it was).
|
||||
* It must delay deletion of old data as long as there are open read-only transactions referring to it, thus we must maintain some sort of WAL to keep track of pending cleanup.
|
||||
* When a transaction is closed, or when we recover from a crash, it should clean up all no longer needed data to avoid memory/storage leaks.
|
||||
* **Security and Auth**
|
||||
* If we allow connections over gRPC, we must consider this issues and allow both encryption (SSL), and some basic auth rules to prevent undesired access to the DB
|
||||
* This is client-specific and does not need to be supported in the in-process, embedded version.
|
||||
|
||||
## Details
|
||||
|
||||
Here we go more in-depth in each of the sections, explaining the reasoning and more details on the desired behavior. This document is only the high-level architecture and should support multiple implementations. When building out a specific implementation, a similar document should be provided for that repo, showing how it implements these concepts, and details about memory usage, storage, efficiency, etc.
|
||||
|
||||
|
||||
### State
|
||||
|
||||
The current ABCi interface avoids this question a bit and that has brought confusion. If I use `merkleeyes` to store data, which state is returned from `Query`? The current "working" state, which I would like to refer to in my ABCi application? Or the last committed state, which I would like to return to a client's query? Or an old state, which I may select based on height?
|
||||
|
||||
Right now, `merkleeyes` implements `Query` like a normal ABCi app and only returns committed state, which has lead to problems and confusion. Thus, we need to be explicit about which state we want to view. Each viewer can then specify which state it wants to view. This allows the app to query the working state in DeliverTx, but the committed state in Query.
|
||||
|
||||
We can easily provide two global references for "last committed" and "current working" states. However, if we want to also allow querying of older commits... then we need some way to keep track of which ones are still in use, so we can garbage collect the unneeded ones. There is a non-trivial overhead in holding references to all past states, but also a hard-coded solution (hold onto the last 5 commits) may not support all clients. We should let the client define this somehow.
|
||||
|
||||
### Transactions
|
||||
|
||||
Transactions (in the typical database sense) are a clean and established solution to this issue. We can look at the [isolations levels](https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable) which attempt to provide us things like "repeatable reads". That means if we open a transaction, and query some data 100 times while other processes are writing to the db, we get the same result each time. This transaction has a reference to its own local state from the time the transaction started. (We are referring to the highest isolation levels here, which correlate well this the blockchain use case).
|
||||
|
||||
If we implement a read-only transaction as a reference to state at the time of creation of that transaction, we can then hold these references to various snapshots, one per block that we are interested, and allow the client to multiplex queries and proofs from these various blocks.
|
||||
|
||||
If we continue using these concepts (which have informed 30+ years of server side design), we can add a few nice features to our write transactions. The first of which is `Rollback` and `Commit`. That means all the changes we make in this transaction have no effect on the database until they are committed. And until they are committed, we can always abort if we detect an anomaly, returning to the last committed state with a rollback.
|
||||
|
||||
There is also a nice extension to this available on some database servers, basically, "nested" transactions or "savepoints". This means that within one transaction, you can open a subtransaction/savepoint and continue work. Later you have the option to commit or rollback all work since the savepoint/subtransaction. And then continue with the main transaction.
|
||||
|
||||
If you don't understand why this is useful, look at how basecoin needs to [hold cached state for AppTx](https://github.com/tendermint/basecoin/blob/master/state/execution.go#L126-L149), meaning that it rolls back all modifications if the AppTx returns an error. This was implemented as a wrapper in basecoin, but it is a reasonable thing to support in the DB interface itself (especially since the implementation becomes quite non-trivial as soon as you support range queries).
|
||||
|
||||
To give a bit more reference to this concept in practice, read about [Savepoints in Postgresql](https://www.postgresql.org/docs/current/static/tutorial-transactions.html) ([reference](https://www.postgresql.org/docs/current/static/sql-savepoint.html)) or [Nesting transactions in SQL Server](http://dba-presents.com/index.php/databases/sql-server/43-nesting-transactions-and-save-transaction-command) (TL;DR: scroll to the bottom, section "Real nesting transactions with SAVE TRANSACTION")
|
||||
|
||||
### Functionality
|
||||
|
||||
Merkle trees work with key-value pairs, so we should most importantly focus on the basic Key-Value operations. That is `Get`, `Set`, and `Remove`. We also need to return a merkle proof for any key, along with a root hash of the tree for committing state to the blockchain. This is just the basic merkle-tree stuff.
|
||||
|
||||
If it is possible with the implementation, it is nice to provide access to Range Queries. That is, return all values where the key is between X and Y. If you construct your keys wisely, it is possible to store lists (1:N) relations this way. Eg, storing blog posts and the key is blog:`poster_id`:`sequence`, then I could search for all blog posts by a given `poster_id`, or even return just posts 10-19 from the given poster.
|
||||
|
||||
The construction of a tree that supports range queries was one of the [design decisions of go-merkle](https://github.com/tendermint/go-merkle/blob/master/README.md). It is also kind of possible with [ethereum's patricia trie](https://github.com/ethereum/wiki/wiki/Patricia-Tree) as long as the key is less than 32 bytes.
|
||||
|
||||
In addition to range queries, there is one more nice feature that we could add to our data store - listening to events. Depending on your context, this is "reactive programming", "event emitters", "notifications", etc... But the basic concept is that a client can listen for all changes to a given key (or set of keys), and receive a notification when this happens. This is very important to avoid [repeated polling and wasted queries](http://resthooks.org/) when a client simply wants to [detect changes](https://www.rethinkdb.com/blog/realtime-web/).
|
||||
|
||||
If the database provides access to some "listener" functionality, the app can choose to expose this to the external client via websockets, web hooks, http2 push events, android push notifications, etc, etc etc.... But if we want to support modern client functionality, let's add support for this reactive paradigm in our DB interface.
|
||||
|
||||
**TODO** support for more advanced backends, eg. Bolt....
|
||||
|
||||
### Go Interface
|
||||
|
||||
I will start with a simple go interface to illustrate the in-process interface. Once there is agreement on how this looks, we can work out the gRPC bindings to support calling out of process. These interfaces are not finalized code, but I think the demonstrate the concepts better than text and provide a strawman to get feedback.
|
||||
|
||||
```
|
||||
// DB represents the committed state of a merkle-ized key-value store
|
||||
type DB interface {
|
||||
// Snapshot returns a reference to last committed state to use for
|
||||
// providing proofs, you must close it at the end to garbage collect
|
||||
// the historical state we hold on to to make these proofs
|
||||
Snapshot() Prover
|
||||
|
||||
// Start a transaction - only way to change state
|
||||
// This will return an error if there is an open Transaction
|
||||
Begin() (Transaction, error)
|
||||
|
||||
// These callbacks are triggered when the Transaction is Committed
|
||||
// to the DB. They can be used to eg. notify clients via websockets when
|
||||
// their account balance changes.
|
||||
AddListener(key []byte, listener Listener)
|
||||
RemoveListener(listener Listener)
|
||||
}
|
||||
|
||||
// DBReader represents a read-only connection to a snapshot of the db
|
||||
type DBReader interface {
|
||||
// Queries on my local view
|
||||
Has(key []byte) (bool, error)
|
||||
Get(key []byte) (Model, error)
|
||||
GetRange(start, end []byte, ascending bool, limit int) ([]Model, error)
|
||||
Closer
|
||||
}
|
||||
|
||||
// Prover is an interface that lets one query for Proofs, holding the
|
||||
// data at a specific location in memory
|
||||
type Prover interface {
|
||||
DBReader
|
||||
|
||||
// Hash is the AppHash (RootHash) for this block
|
||||
Hash() (hash []byte)
|
||||
|
||||
// Prove returns the data along with a merkle Proof
|
||||
// Model and Proof are nil if not found
|
||||
Prove(key []byte) (Model, Proof, error)
|
||||
}
|
||||
|
||||
// Transaction is a set of state changes to the DB to be applied atomically.
|
||||
// There can only be one open transaction at a time, which may only have
|
||||
// maximum one subtransaction at a time.
|
||||
// In short, at any time, there is exactly one object that can write to the
|
||||
// DB, and we can use Subtransactions to group operations and roll them back
|
||||
// together (kind of like `types.KVCache` from basecoin)
|
||||
type Transaction interface {
|
||||
DBReader
|
||||
|
||||
// Change the state - will raise error immediately if this Transaction
|
||||
// is not holding the exclusive write lock
|
||||
Set(model Model) (err error)
|
||||
Remove(key []byte) (removed bool, err error)
|
||||
|
||||
// Subtransaction starts a new subtransaction, rollback will not affect the
|
||||
// parent. Only on Commit are the changes applied to this transaction.
|
||||
// While the subtransaction exists, no write allowed on the parent.
|
||||
// (You must Commit or Rollback the child to continue)
|
||||
Subtransaction() Transaction
|
||||
|
||||
// Commit this transaction (or subtransaction), the parent reference is
|
||||
// now updated.
|
||||
// This only updates persistant store if the top level transaction commits
|
||||
// (You may have any number of nested sub transactions)
|
||||
Commit() error
|
||||
|
||||
// Rollback ends the transaction and throw away all transaction-local state,
|
||||
// allowing the tree to prune those elements.
|
||||
// The parent transaction now recovers the write lock.
|
||||
Rollback()
|
||||
}
|
||||
|
||||
// Listener registers callbacks on changes to the data store
|
||||
type Listener interface {
|
||||
OnSet(key, value, oldValue []byte)
|
||||
OnRemove(key, oldValue []byte)
|
||||
}
|
||||
|
||||
// Proof represents a merkle proof for a key
|
||||
type Proof interface {
|
||||
RootHash() []byte
|
||||
Verify(key, value, root []byte) bool
|
||||
}
|
||||
|
||||
type Model interface {
|
||||
Key() []byte
|
||||
Value() []byte
|
||||
}
|
||||
|
||||
// Closer releases the reference to this state, allowing us to garbage collect
|
||||
// Make sure to call it before discarding.
|
||||
type Closer interface {
|
||||
Close()
|
||||
}
|
||||
```
|
||||
|
||||
### Remote Interface
|
||||
|
||||
The use-case of allowing out-of-process calls is very powerful. Not just to provide a powerful merkle-ready data store to non-go applications.
|
||||
|
||||
It we allow the ABCi app to maintain the only writable connections, we can guarantee that all transactions are only processed through the tendermint consensus engine. We could then allow multiple "web server" machines "read-only" access and scale out the database reads, assuming the consensus engine, ABCi logic, and public key cryptography is more the bottleneck than the database. We could even place the consensus engine, ABCi app, and data store on one machine, connected with unix sockets for security, and expose a tcp/ssl interface for reading the data, to scale out query processing over multiple machines.
|
||||
|
||||
But returning our focus directly to the ABCi app (which is the most important use case). An app may well want to maintain 100 or 1000 snapshots of different heights to allow people to easily query many proofs at a given height without race conditions (very important for IBC, ask Jae). Thus, we should not require a separate TCP connection for each height, as this gets quite awkward with so many connections. Also, if we want to use gRPC, we should consider the connections potentially transient (although they are more efficient with keep-alive).
|
||||
|
||||
Thus, the wire encoding of a transaction or a snapshot should simply return a unique id. All methods on a `Prover` or `Transaction` over the wire can send this id along with the arguments for the method call. And we just need a hash map on the server to map this id to a state.
|
||||
|
||||
The only negative of not requiring a persistent tcp connection for each snapshot is there is no auto-detection if the client crashes without explicitly closing the connections. Thus, I would suggest adding a `Ping` thread in the gRPC interface which keeps the Snapshot alive. If no ping is received within a server-defined time, it may automatically close those transactions. And if we consider a client with 500 snapshots that needs to ping each every 10 seconds, that is a lot of overhead, so we should design the ping to accept a list of IDs for the client and update them all. Or associate all snapshots with a clientID and then just send the clientID in the ping. (Please add other ideas on how to detect client crashes without persistent connections).
|
||||
|
||||
To encourage adoption, we should provide a nice client that uses this gRPC interface (like we do with ABCi). For go, the client may have the exact same interface as the in-process version, just that the error call may return network errors, not just illegal operations. We should also add a client with a clean API for Java, since that seems to be popular among app developers in the current tendermint community. Other bindings as we see the need in the server space.
|
||||
|
||||
### Persistence
|
||||
|
||||
Any data store worth it's name should not lose all data on a crash. Even [redis provides some persistence](https://redis.io/topics/persistence) these days. Ideally, if the system crashes and restarts, it should have the data at the last block N that was committed. If the system crash during the commit of block N+1, then the recovered state should either be block N or completely committed block N+1, but no partial state between the two. Basically, the commit must be an atomic operation (even if updating 100's of records).
|
||||
|
||||
To avoid a lot of headaches ourselves, we can use an existing data store, such as leveldb, which provides `WriteBatch` to group all operations.
|
||||
|
||||
The other issue is cleaning up old state. We cannot delete any information from our persistent store, as long as any snapshot holds a reference to it (or else we get some panics when the data we query is not there). So, we need to store the outstanding deletions that we can perform when the snapshot is `Close`d. In addition, we must consider the case that the data store crashes with open snapshots. Thus, the info on outstanding deletions must also be persisted somewhere. Something like a "delete-behind log" (the opposite of a "write ahead log").
|
||||
|
||||
This is not a concern of the generic interface, but each implementation should take care to handle this well to avoid accumulation of unused references in the data store and eventual data bloat.
|
||||
|
||||
#### Backing stores
|
||||
|
||||
It is way outside the scope of this project to build our own database that is capable of efficiently storing the data, provide multiple read-only snapshots at once, and save it atomically. The best approach seems to select an existing database (best a simple one) that provides this functionality and build upon it, much like the current `go-merkle` implementation builds upon `leveldb`. After some research here are winners and losers:
|
||||
|
||||
**Winners**
|
||||
|
||||
* Leveldb - [provides consistent snapshots](https://ayende.com/blog/161705/reviewing-leveldb-part-xiii-smile-and-here-is-your-snapshot), and [provides tooling for building ACID compliance](http://codeofrob.com/entries/writing-a-transaction-manager-on-top-of-leveldb.html)
|
||||
* Note there are at least two solid implementations available in go - [goleveldb](https://github.com/syndtr/goleveldb) - a pure go implementation, and [levigo](https://github.com/jmhodges/levigo) - a go wrapper around leveldb.
|
||||
* Goleveldb is much easier to compile and cross-compile (not requiring cgo), while levigo (or cleveldb) seems to provide a significant performance boosts (but I had trouble even running benchmarks)
|
||||
* PostgreSQL - fully supports these ACID semantics if you call `SET TRANSACTION ISOLATION LEVEL SERIALIZABLE` at the beginning of a transaction (tested)
|
||||
* This may be total overkill unless we also want to make use of other features, like storing data in multiple columns with secondary indexes.
|
||||
* Trillian can show an example of [how to store a merkle tree in sql](https://github.com/google/trillian/blob/master/storage/mysql/tree_storage.go)
|
||||
|
||||
**Losers**
|
||||
|
||||
* Bolt - open [read-only snapshots can block writing](https://github.com/boltdb/bolt/issues/378)
|
||||
* Mongo - [barely even supports atomic operations](https://docs.mongodb.com/manual/core/write-operations-atomicity/), much less multiple snapshots
|
||||
|
||||
**To investigate**
|
||||
|
||||
* [Trillian](https://github.com/google/trillian) - has a [persistent merkle tree interface](https://github.com/google/trillian/blob/master/storage/tree_storage.go) along with [backend storage with mysql](https://github.com/google/trillian/blob/master/storage/mysql/tree_storage.go), good inspiration for our design if not directly using it
|
||||
* [Moss](https://github.com/couchbase/moss) - another key-value store in go, seems similar to leveldb, maybe compare with performance tests?
|
||||
|
||||
### Security
|
||||
|
||||
When allowing access out-of-process, we should provide different mechanisms to secure it. The first is the choice of binding to a local unix socket or a tcp port. The second is the optional use of ssl to encrypt the connection (very important over tcp). The third is authentication to control access to the database.
|
||||
|
||||
We may also want to consider the case of two server connections with different permissions, eg. a local unix socket that allows write access with no more credentials, and a public TCP connection with ssl and authentication that only provides read-only access.
|
||||
|
||||
The use of ssl is quite easy in go, we just need to generate and sign a certificate, so it is nice to be able to disable it for dev machines, but it is very important for production.
|
||||
|
||||
For authentication, let me sketch out a minimal solution. The server could just have a simple config file with key/bcrypt(password) pairs along with read/write permission level, and read that upon startup. The client must provide a username and password in the HTTP headers when making the original HTTPS gRPC connection.
|
||||
|
||||
This is super minimal to provide some protection. Things like LDAP, OAuth and single-sign on seem overkill and even potential security holes. Maybe there is another solution somewhere in the middle.
|
@@ -1,17 +0,0 @@
|
||||
# Merkle data stores
|
||||
|
||||
To allow the efficient creation of an ABCi app, tendermint wishes to provide a reference implemention of a key-value store that provides merkle proofs of the data. These proofs then quickly allow the ABCi app to provide an apphash to the consensus engine, as well as a full proof to any client.
|
||||
|
||||
This engine is currently implemented in `go-merkle` with `merkleeyes` providing a language-agnostic binding via ABCi. It uses `tmlibs/db` bindings internally to persist data to leveldb.
|
||||
|
||||
What are some of the requirements of this store:
|
||||
|
||||
* It must support efficient key-value operations (get/set/delete)
|
||||
* It must support persistance.
|
||||
* We must only persist complete blocks, so when we come up after a crash we are at the state of block N or N+1, but not in-between these two states.
|
||||
* It must allow us to read/write from one uncommited state (working state), while serving other queries from the last commited state. And a way to determine which one to serve for each client.
|
||||
* It must allow us to hold references to old state, to allow providing proofs from 20 blocks ago. We can define some limits as to the maximum time to hold this data.
|
||||
* We provide in process binding in Go
|
||||
* We provide language-agnostic bindings when running the data store as it's own process.
|
||||
|
||||
|
BIN
docs/assets/abci.png
Normal file
BIN
docs/assets/abci.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 43 KiB |
BIN
docs/assets/consensus_logic.png
Normal file
BIN
docs/assets/consensus_logic.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 103 KiB |
BIN
docs/assets/tm-transaction-flow.png
Normal file
BIN
docs/assets/tm-transaction-flow.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.4 MiB |
BIN
docs/assets/tmint-logo-blue.png
Normal file
BIN
docs/assets/tmint-logo-blue.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 52 KiB |
199
docs/conf.py
Normal file
199
docs/conf.py
Normal file
@@ -0,0 +1,199 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Tendermint documentation build configuration file, created by
|
||||
# sphinx-quickstart on Mon Aug 7 04:55:09 2017.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
import os
|
||||
# import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
import urllib
|
||||
|
||||
import sphinx_rtd_theme
|
||||
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#
|
||||
# needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = []
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
source_suffix = ['.rst', '.md']
|
||||
# source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Tendermint'
|
||||
copyright = u'2017, The Authors'
|
||||
author = u'Tendermint'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = u''
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = u''
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This patterns also effect to html_static_path and html_extra_path
|
||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture']
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
# html_theme = 'alabaster'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#
|
||||
# html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# Custom sidebar templates, must be a dictionary that maps document names
|
||||
# to template names.
|
||||
#
|
||||
# This is required for the alabaster theme
|
||||
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
|
||||
html_sidebars = {
|
||||
'**': [
|
||||
'about.html',
|
||||
'navigation.html',
|
||||
'relations.html', # needs 'show_related': True theme option to display
|
||||
'searchbox.html',
|
||||
'donate.html',
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
# -- Options for HTMLHelp output ------------------------------------------
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'Tendermintdoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#
|
||||
# 'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#
|
||||
# 'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'Tendermint.tex', u'Tendermint Documentation',
|
||||
u'The Authors', 'manual'),
|
||||
]
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'Tendermint', u'Tendermint Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'Tendermint', u'Tendermint Documentation',
|
||||
author, 'Tendermint', 'Byzantine Fault Tolerant Consensus.',
|
||||
'Database'),
|
||||
]
|
||||
|
||||
repo = "https://raw.githubusercontent.com/tendermint/tools/"
|
||||
branch = "master"
|
||||
|
||||
tools = "./tools"
|
||||
assets = tools + "/assets"
|
||||
|
||||
if os.path.isdir(tools) != True:
|
||||
os.mkdir(tools)
|
||||
if os.path.isdir(assets) != True:
|
||||
os.mkdir(assets)
|
||||
|
||||
urllib.urlretrieve(repo+branch+'/ansible/README.rst', filename=tools+'/ansible.rst')
|
||||
urllib.urlretrieve(repo+branch+'/ansible/assets/a_plus_t.png', filename=assets+'/a_plus_t.png')
|
||||
|
||||
urllib.urlretrieve(repo+branch+'/docker/README.rst', filename=tools+'/docker.rst')
|
||||
|
||||
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/README.rst', filename=tools+'/mintnet-kubernetes.rst')
|
||||
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/gce1.png', filename=assets+'/gce1.png')
|
||||
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/gce2.png', filename=assets+'/gce2.png')
|
||||
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/statefulset.png', filename=assets+'/statefulset.png')
|
||||
urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets+'/t_plus_k.png')
|
||||
|
||||
urllib.urlretrieve(repo+branch+'/terraform-digitalocean/README.rst', filename=tools+'/terraform-digitalocean.rst')
|
||||
urllib.urlretrieve(repo+branch+'/tm-bench/README.rst', filename=tools+'/benchmarking-and-monitoring.rst')
|
||||
# the readme for below is included in tm-bench
|
||||
# urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/tm-monitor.rst')
|
90
docs/deploy-testnets.rst
Normal file
90
docs/deploy-testnets.rst
Normal file
@@ -0,0 +1,90 @@
|
||||
Deploy a Testnet
|
||||
================
|
||||
|
||||
Now that we've seen how ABCI works, and even played with a few
|
||||
applications on a single validator node, it's time to deploy a test
|
||||
network to four validator nodes. For this deployment, we'll use the
|
||||
``basecoin`` application.
|
||||
|
||||
Manual Deployments
|
||||
------------------
|
||||
|
||||
It's relatively easy to setup a Tendermint cluster manually. The only
|
||||
requirements for a particular Tendermint node are a private key for the
|
||||
validator, stored as ``priv_validator.json``, and a list of the public
|
||||
keys of all validators, stored as ``genesis.json``. These files should
|
||||
be stored in ``~/.tendermint``, or wherever the ``$TMROOT`` variable
|
||||
might be set to.
|
||||
|
||||
Here are the steps to setting up a testnet manually:
|
||||
|
||||
1) Provision nodes on your cloud provider of choice
|
||||
2) Install Tendermint and the application of interest on all nodes
|
||||
3) Generate a private key for each validator using
|
||||
``tendermint gen_validator``
|
||||
4) Compile a list of public keys for each validator into a
|
||||
``genesis.json`` file.
|
||||
5) Run ``tendermint node --p2p.seeds=< seed addresses >`` on each node,
|
||||
where ``< seed addresses >`` is a comma separated list of the IP:PORT
|
||||
combination for each node. The default port for Tendermint is
|
||||
``46656``. Thus, if the IP addresses of your nodes were
|
||||
``192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4``, the command
|
||||
would look like:
|
||||
``tendermint node --p2p.seeds=192.168.0.1:46656,192.168.0.2:46656,192.168.0.3:46656,192.168.0.4:46656``.
|
||||
|
||||
After a few seconds, all the nodes should connect to eachother and start
|
||||
making blocks! For more information, see the Tendermint Networks section
|
||||
of `the guide to using Tendermint <using-tendermint.html>`__.
|
||||
|
||||
Automated Deployments
|
||||
---------------------
|
||||
|
||||
While the manual deployment is easy enough, an automated deployment is
|
||||
usually quicker. The below examples show different tools that can be used
|
||||
for automated deployments.
|
||||
|
||||
Automated Deployment using Kubernetes
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The `mintnet-kubernetes tool <https://github.com/tendermint/tools/tree/master/mintnet-kubernetes>`__
|
||||
allows automating the deployment of a Tendermint network on an already
|
||||
provisioned kubernetes cluster. For simple provisioning of a kubernetes
|
||||
cluster, check out the `Google Cloud Platform <https://cloud.google.com/>`__.
|
||||
|
||||
Automated Deployment using Terraform and Ansible
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The `terraform-digitalocean tool <https://github.com/tendermint/tools/tree/master/terraform-digitalocean>`__
|
||||
allows creating a set of servers on the DigitalOcean cloud.
|
||||
|
||||
The `ansible playbooks <https://github.com/tendermint/tools/tree/master/ansible>`__
|
||||
allow creating and managing a ``basecoin`` or ``ethermint`` testnet on provisioned servers.
|
||||
|
||||
Package Deployment on Linux for developers
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The ``tendermint`` and ``basecoin`` applications can be installed from RPM or DEB packages on
|
||||
Linux machines for development purposes. The packages are configured to be validators on the
|
||||
one-node network that the machine represents. The services are not started after installation,
|
||||
this way giving an opportunity to reconfigure the applications before starting.
|
||||
|
||||
The Ansible playbooks in the previous section use this repository to install ``basecoin``.
|
||||
After installation, additional steps are executed to make sure that the multi-node testnet has
|
||||
the right configuration before start.
|
||||
|
||||
Install from the CentOS/RedHat repository:
|
||||
|
||||
::
|
||||
|
||||
rpm --import https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint
|
||||
wget -O /etc/yum.repos.d/tendermint.repo https://tendermint-packages.interblock.io/centos/7/os/x86_64/tendermint.repo
|
||||
yum install basecoin
|
||||
|
||||
Install from the Debian/Ubuntu repository:
|
||||
|
||||
::
|
||||
|
||||
wget -O - https://tendermint-packages.interblock.io/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint | apt-key add -
|
||||
wget -O /etc/apt/sources.list.d/tendermint.list https://tendermint-packages.interblock.io/debian/tendermint.list
|
||||
apt-get update && apt-get install basecoin
|
||||
|
94
docs/ecosystem.rst
Normal file
94
docs/ecosystem.rst
Normal file
@@ -0,0 +1,94 @@
|
||||
Tendermint Ecosystem
|
||||
====================
|
||||
|
||||
Below are the many applications built using various pieces of the Tendermint stack. We thank the community for their contributions thus far and welcome the addition of new projects. Feel free to submit a pull request to add your project!
|
||||
|
||||
ABCI Applications
|
||||
-----------------
|
||||
|
||||
Burrow
|
||||
^^^^^^
|
||||
|
||||
Ethereum Virtual Machine augmented with native permissioning scheme and global key-value store, written in Go, authored by Monax Industries, and incubated `by Hyperledger <https://github.com/hyperledger/burrow>`__.
|
||||
|
||||
cb-ledger
|
||||
^^^^^^^^^
|
||||
|
||||
Custodian Bank Ledger, integrating central banking with the blockchains of tomorrow, written in C++, and `authored by Block Finance <https://github.com/block-finance/cpp-abci>`__.
|
||||
|
||||
Clearchain
|
||||
^^^^^^^^^^
|
||||
|
||||
Application to manage a distributed ledger for money transfers that support multi-currency accounts, written in Go, and `authored by Allession Treglia <https://github.com/tendermint/clearchain>`__.
|
||||
|
||||
Comit
|
||||
^^^^^
|
||||
|
||||
Public service reporting and tracking, written in Go, and `authored by Zach Balder <https://github.com/zbo14/comit>`__.
|
||||
|
||||
Cosmos SDK
|
||||
^^^^^^^^^^
|
||||
|
||||
A prototypical account based crypto currency state machine supporting plugins, written in Go, and `authored by Cosmos <https://github.com/cosmos/cosmos-sdk>`__.
|
||||
|
||||
Ethermint
|
||||
^^^^^^^^^
|
||||
|
||||
The go-ethereum state machine run as a ABCI app, written in Go, `authored by Tendermint <https://github.com/tendermint/ethermint>`__.
|
||||
|
||||
|
||||
Merkle AVL Tree
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
The following are implementations of the Tendermint IAVL tree as an ABCI application
|
||||
|
||||
Merkleeyes
|
||||
~~~~~~~~~~
|
||||
|
||||
Written in Go, `authored by Tendermint <https://github.com/tendermint/merkleeyes>`__.
|
||||
|
||||
MerkleTree
|
||||
~~~~~~~~~~
|
||||
|
||||
Written in Java, `authored by jTendermint <https://github.com/jTendermint/MerkleTree>`__.
|
||||
|
||||
|
||||
TMChat
|
||||
^^^^^^
|
||||
|
||||
P2P chat using Tendermint, written in Java, `authored by woldposd <https://github.com/wolfposd/TMChat>`__.
|
||||
|
||||
Passwerk
|
||||
^^^^^^^^
|
||||
|
||||
Encrypted storage web-utility backed by Tendermint, written in Go, `authored by Rigel Rozanski <https://github.com/rigelrozanski/passwerk>`__.
|
||||
|
||||
ABCI Servers
|
||||
------------
|
||||
|
||||
|
||||
+-------------------------------------------------------------+--------------------+--------------+
|
||||
| **Name** | **Author** | **Language** |
|
||||
| | | |
|
||||
+-------------------------------------------------------------+--------------------+--------------+
|
||||
| `abci <https://github.com/tendermint/abci>`__ | Tendermint | Go |
|
||||
+-------------------------------------------------------------+--------------------+--------------+
|
||||
| `js abci <https://github.com/tendermint/js-abci>`__ | Tendermint | Javascript |
|
||||
+-------------------------------------------------------------+--------------------+--------------+
|
||||
| `cpp-tmsp <https://github.com/mdyring/cpp-tmsp>`__ | Martin Dyring | C++ |
|
||||
+-------------------------------------------------------------+--------------------+--------------+
|
||||
| `jabci <https://github.com/jTendermint/jabci>`__ | jTendermint | Java |
|
||||
+-------------------------------------------------------------+--------------------+--------------+
|
||||
| `Spearmint <https://github.com/dennismckinnon/spearmint>`__ | Dennis Mckinnon | Javascript |
|
||||
+-------------------------------------------------------------+--------------------+--------------+
|
||||
| `ocaml-tmsp <https://github.com/zbo14/ocaml-tmsp>`__ | Zach Balder | Ocaml |
|
||||
+-------------------------------------------------------------+--------------------+--------------+
|
||||
| `abci_server <https://github.com/KrzysiekJ/abci_server>`__ | Krzysztof Jurewicz | Erlang |
|
||||
+-------------------------------------------------------------+--------------------+--------------+
|
||||
|
||||
Deployment Tools
|
||||
----------------
|
||||
|
||||
See `deploy testnets <./deploy-testnets.html>`__ for information about all the tools built by Tendermint. We have Kubernetes, Ansible, and Terraform integrations.
|
||||
|
||||
Cloudsoft built `brooklyn-tendermint <https://github.com/cloudsoft/brooklyn-tendermint>`__ for deploying a tendermint testnet in docker continers. It uses Clocker for Apache Brooklyn.
|
277
docs/getting-started.rst
Normal file
277
docs/getting-started.rst
Normal file
@@ -0,0 +1,277 @@
|
||||
First Tendermint App
|
||||
====================
|
||||
|
||||
As a general purpose blockchain engine, Tendermint is agnostic to the
|
||||
application you want to run. So, to run a complete blockchain that does
|
||||
something useful, you must start two programs: one is Tendermint Core,
|
||||
the other is your application, which can be written in any programming
|
||||
language. Recall from `the intro to ABCI <introduction.rst#ABCI-Overview>`__ that
|
||||
Tendermint Core handles all the p2p and consensus stuff, and just
|
||||
forwards transactions to the application when they need to be validated,
|
||||
or when they're ready to be committed to a block.
|
||||
|
||||
In this guide, we show you some examples of how to run an application
|
||||
using Tendermint.
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
The first apps we will work with are written in Go. To install them, you
|
||||
need to `install Go <https://golang.org/doc/install>`__ and put
|
||||
``$GOPATH/bin`` in your
|
||||
``$PATH``; see `here <https://github.com/tendermint/tendermint/wiki/Setting-GOPATH>`__ for more info.
|
||||
|
||||
Then run
|
||||
|
||||
::
|
||||
|
||||
go get -u github.com/tendermint/abci/cmd/...
|
||||
|
||||
If there is an error, install and run the ``glide`` tool to pin the
|
||||
dependencies:
|
||||
|
||||
::
|
||||
|
||||
go get github.com/Masterminds/glide
|
||||
cd $GOPATH/src/github.com/tendermint/abci
|
||||
glide install
|
||||
go install ./cmd/...
|
||||
|
||||
Now you should have the ``abci-cli`` plus two apps installed:
|
||||
|
||||
::
|
||||
|
||||
dummy --help
|
||||
counter --help
|
||||
|
||||
These binaries are installed on ``$GOPATH/bin`` and all come from within
|
||||
the ``./cmd/...`` directory of the abci repository.
|
||||
|
||||
Both of these example applications are in Go. See below for an
|
||||
application written in Javascript.
|
||||
|
||||
Now, let's run some apps!
|
||||
|
||||
Dummy - A First Example
|
||||
-----------------------
|
||||
|
||||
The dummy app is a `Merkle
|
||||
tree <https://en.wikipedia.org/wiki/Merkle_tree>`__ that just stores all
|
||||
transactions. If the transaction contains an ``=``, eg. ``key=value``,
|
||||
then the ``value`` is stored under the ``key`` in the Merkle tree.
|
||||
Otherwise, the full transaction bytes are stored as the key and the
|
||||
value.
|
||||
|
||||
Let's start a dummy application.
|
||||
|
||||
::
|
||||
|
||||
dummy
|
||||
|
||||
In another terminal, we can start Tendermint. If you have never run
|
||||
Tendermint before, use:
|
||||
|
||||
::
|
||||
|
||||
tendermint init
|
||||
tendermint node
|
||||
|
||||
If you have used Tendermint, you may want to reset the data for a new
|
||||
blockchain by running ``tendermint unsafe_reset_all``. Then you can run
|
||||
``tendermint node`` to start Tendermint, and connect to the app. For
|
||||
more details, see `the guide on using
|
||||
Tendermint <./using-tendermint.html>`__.
|
||||
|
||||
You should see Tendermint making blocks! We can get the status of our
|
||||
Tendermint node as follows:
|
||||
|
||||
::
|
||||
|
||||
curl -s localhost:46657/status
|
||||
|
||||
The ``-s`` just silences ``curl``. For nicer output, pipe the result
|
||||
into a tool like `jq <https://stedolan.github.io/jq/>`__ or
|
||||
`jsonpp <https://github.com/jmhodges/jsonpp>`__.
|
||||
|
||||
Now let's send some transactions to the dummy.
|
||||
|
||||
::
|
||||
|
||||
curl -s 'localhost:46657/broadcast_tx_commit?tx="abcd"'
|
||||
|
||||
Note the single quote (``'``) around the url, which ensures that the
|
||||
double quotes (``"``) are not escaped by bash. This command sent a
|
||||
transaction with bytes ``abcd``, so ``abcd`` will be stored as both the
|
||||
key and the value in the Merkle tree. The response should look something
|
||||
like:
|
||||
|
||||
::
|
||||
|
||||
{"jsonrpc":"2.0","id":"","result":[98,{"check_tx":{},"deliver_tx":{}}],"error":""}
|
||||
|
||||
The ``98`` is a type-byte, and can be ignored (it's useful for
|
||||
serializing and deserializing arbitrary json). Otherwise, this result is
|
||||
empty - there's nothing to report on and everything is OK.
|
||||
|
||||
We can confirm that our transaction worked and the value got stored by
|
||||
querying the app:
|
||||
|
||||
::
|
||||
|
||||
curl -s 'localhost:46657/abci_query?data="abcd"&path=""&prove=false'
|
||||
|
||||
The ``path`` and ``prove`` arguments can be ignored for now, and in a
|
||||
future release can be left out. The result should look like:
|
||||
|
||||
::
|
||||
|
||||
{"jsonrpc":"2.0","id":"","result":[112,{"response":{"value":"61626364","log":"exists"}}],"error":""}
|
||||
|
||||
Again, the ``112`` is the type-byte. Note the ``value`` in the result
|
||||
(``61626364``); this is the hex-encoding of the ASCII of ``abcd``. You
|
||||
can verify this in a python shell by running
|
||||
``"61626364".decode('hex')``. Stay tuned for a future release that makes
|
||||
this output more human-readable ;).
|
||||
|
||||
Now let's try setting a different key and value:
|
||||
|
||||
::
|
||||
|
||||
curl -s 'localhost:46657/broadcast_tx_commit?tx="name=satoshi"'
|
||||
|
||||
Now if we query for ``name``, we should get ``satoshi``, or
|
||||
``7361746F736869`` in hex:
|
||||
|
||||
::
|
||||
|
||||
curl -s 'localhost:46657/abci_query?data="name"&path=""&prove=false'
|
||||
|
||||
Try some other transactions and queries to make sure everything is
|
||||
working!
|
||||
|
||||
Counter - Another Example
|
||||
-------------------------
|
||||
|
||||
Now that we've got the hang of it, let's try another application, the
|
||||
"counter" app.
|
||||
|
||||
The counter app doesn't use a Merkle tree, it just counts how many times
|
||||
we've sent a transaction, or committed the state.
|
||||
|
||||
This application has two modes: ``serial=off`` and ``serial=on``.
|
||||
|
||||
When ``serial=on``, transactions must be a big-endian encoded
|
||||
incrementing integer, starting at 0.
|
||||
|
||||
If ``serial=off``, there are no restrictions on transactions.
|
||||
|
||||
In a live blockchain, transactions collect in memory before they are
|
||||
committed into blocks. To avoid wasting resources on invalid
|
||||
transactions, ABCI provides the ``CheckTx`` message, which application
|
||||
developers can use to accept or reject transactions, before they are
|
||||
stored in memory or gossipped to other peers.
|
||||
|
||||
In this instance of the counter app, with ``serial=on``, ``CheckTx``
|
||||
only allows transactions whose integer is greater than the last
|
||||
committed one.
|
||||
|
||||
Let's kill the previous instance of ``tendermint`` and the ``dummy``
|
||||
application, and start the counter app. We can enable ``serial=on`` with
|
||||
a flag:
|
||||
|
||||
::
|
||||
|
||||
counter --serial
|
||||
|
||||
In another window, reset then start Tendermint:
|
||||
|
||||
::
|
||||
|
||||
tendermint unsafe_reset_all
|
||||
tendermint node
|
||||
|
||||
Once again, you can see the blocks streaming by. Let's send some
|
||||
transactions. Since we have set ``serial=on``, the first transaction
|
||||
must be the number ``0``:
|
||||
|
||||
::
|
||||
|
||||
curl localhost:46657/broadcast_tx_commit?tx=0x00
|
||||
|
||||
Note the empty (hence successful) response. The next transaction must be
|
||||
the number ``1``. If instead, we try to send a ``5``, we get an error:
|
||||
|
||||
::
|
||||
|
||||
> curl localhost:46657/broadcast_tx_commit?tx=0x05
|
||||
{"jsonrpc":"2.0","id":"","result":[98,{"check_tx":{},"deliver_tx":{"code":3,"log":"Invalid nonce. Expected 1, got 5"}}],"error":""}
|
||||
|
||||
But if we send a ``1``, it works again:
|
||||
|
||||
::
|
||||
|
||||
> curl localhost:46657/broadcast_tx_commit?tx=0x01
|
||||
{"jsonrpc":"2.0","id":"","result":[98,{"check_tx":{},"deliver_tx":{}}],"error":""}
|
||||
|
||||
For more details on the ``broadcast_tx`` API, see `the guide on using
|
||||
Tendermint <./using-tendermint.html>`__.
|
||||
|
||||
CounterJS - Example in Another Language
|
||||
---------------------------------------
|
||||
|
||||
We also want to run applications in another language - in this case,
|
||||
we'll run a Javascript version of the ``counter``. To run it, you'll
|
||||
need to `install node <https://nodejs.org/en/download/>`__.
|
||||
|
||||
You'll also need to fetch the relevant repository, from `here <https://github.com/tendermint/js-abci>`__ then install it. As go devs, we
|
||||
keep all our code under the ``$GOPATH``, so run:
|
||||
|
||||
::
|
||||
|
||||
go get github.com/tendermint/js-abci &> /dev/null
|
||||
cd $GOPATH/src/github.com/tendermint/js-abci/example
|
||||
npm install
|
||||
|
||||
Kill the previous ``counter`` and ``tendermint`` processes. Now run the
|
||||
app:
|
||||
|
||||
::
|
||||
|
||||
node example/app.js
|
||||
|
||||
In another window, reset and start ``tendermint``:
|
||||
|
||||
::
|
||||
|
||||
tendermint unsafe_reset_all
|
||||
tendermint node
|
||||
|
||||
Once again, you should see blocks streaming by - but now, our
|
||||
application is written in javascript! Try sending some transactions, and
|
||||
like before - the results should be the same:
|
||||
|
||||
::
|
||||
|
||||
curl localhost:46657/broadcast_tx_commit?tx=0x00 # ok
|
||||
curl localhost:46657/broadcast_tx_commit?tx=0x05 # invalid nonce
|
||||
curl localhost:46657/broadcast_tx_commit?tx=0x01 # ok
|
||||
|
||||
Neat, eh?
|
||||
|
||||
Basecoin - A More Interesting Example
|
||||
-------------------------------------
|
||||
|
||||
We saved the best for last; the `Cosmos SDK <https://github.com/cosmos/cosmos-sdk>`__ is a general purpose framework for building cryptocurrencies. Unlike the``dummy`` and ``counter``, which are strictly for example purposes. The reference implementation of Cosmos SDK is ``basecoin``, which demonstrates how to use the building blocks of the Cosmos SDK.
|
||||
|
||||
The default ``basecoin`` application is a multi-asset cryptocurrency
|
||||
that supports inter-blockchain communication. For more details on how
|
||||
basecoin works and how to use it, see our `basecoin
|
||||
guide <https://github.com/cosmos/cosmos-sdk/blob/develop/docs/guide/basecoin-basics.md>`__
|
||||
|
||||
In this tutorial you learned how to run applications using Tendermint
|
||||
on a single node. You saw how applications could be written in different
|
||||
languages, and how to send transactions and query for the latest state.
|
||||
But the true power of Tendermint comes from its ability to securely and
|
||||
efficiently run an application across a distributed network of nodes,
|
||||
while keeping them all in sync using its state-of-the-art consensus
|
||||
protocol. Next, we show you how to deploy Tendermint testnets.
|
BIN
docs/images/tmint-logo-blue.png
Normal file
BIN
docs/images/tmint-logo-blue.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 52 KiB |
73
docs/index.rst
Normal file
73
docs/index.rst
Normal file
@@ -0,0 +1,73 @@
|
||||
.. Tendermint documentation master file, created by
|
||||
sphinx-quickstart on Mon Aug 7 04:55:09 2017.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to Tendermint!
|
||||
======================
|
||||
|
||||
|
||||
.. image:: assets/tmint-logo-blue.png
|
||||
:height: 500px
|
||||
:width: 500px
|
||||
:align: center
|
||||
|
||||
Tendermint 101
|
||||
--------------
|
||||
|
||||
.. maxdepth set to 2 for sexinesss
|
||||
.. but use 4 to upgrade overall documentation
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
introduction.rst
|
||||
install.rst
|
||||
getting-started.rst
|
||||
using-tendermint.rst
|
||||
|
||||
Tendermint Tools
|
||||
----------------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
deploy-testnets.rst
|
||||
tools/ansible.rst
|
||||
tools/docker.rst
|
||||
tools/mintnet-kubernetes.rst
|
||||
tools/terraform-digitalocean.rst
|
||||
tools/benchmarking-and-monitoring.rst
|
||||
|
||||
|
||||
Tendermint Ecosystem
|
||||
--------------------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
ecosystem.rst
|
||||
|
||||
Tendermint 102
|
||||
--------------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
abci-cli.rst
|
||||
app-architecture.rst
|
||||
app-development.rst
|
||||
|
||||
Tendermint 201
|
||||
--------------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
specification.rst
|
||||
|
||||
* For a deeper dive, see `this thesis <https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769>`__.
|
||||
* There is also the `original whitepaper <https://tendermint.com/static/docs/tendermint.pdf>`__, though it is now quite outdated.
|
||||
* Readers might also be interested in the `Cosmos Whitepaper <https://cosmos.network/whitepaper>`__ which describes Tendermint, ABCI, and how to build a scalable, heterogeneous, cryptocurrency network.
|
||||
* For example applications and related software built by the Tendermint team and other, see the `software ecosystem <https://tendermint.com/ecosystem>`__.
|
||||
|
||||
Join the `Cosmos and Tendermint Rocket Chat <https://cosmos.rocket.chat>`__ to ask questions and discuss projects.
|
110
docs/install.rst
Normal file
110
docs/install.rst
Normal file
@@ -0,0 +1,110 @@
|
||||
Install Tendermint
|
||||
==================
|
||||
|
||||
From Binary
|
||||
-----------
|
||||
|
||||
To download pre-built binaries, see the `Download page <https://tendermint.com/download>`__.
|
||||
|
||||
From Source
|
||||
-----------
|
||||
|
||||
You'll need `go`, maybe `glide` and the tendermint source code.
|
||||
|
||||
Install Go
|
||||
^^^^^^^^^^
|
||||
|
||||
Make sure you have `installed Go <https://golang.org/doc/install>`__ and
|
||||
set the ``GOPATH``.
|
||||
|
||||
Get Source Code
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
You should be able to install the latest with a simple
|
||||
|
||||
::
|
||||
|
||||
go get github.com/tendermint/tendermint/cmd/tendermint
|
||||
|
||||
Run ``tendermint --help`` and ``tendermint version`` to ensure your
|
||||
installation worked.
|
||||
|
||||
If the installation failed, a dependency may been updated and become
|
||||
incompatible with the latest Tendermint master branch. We solve this
|
||||
using the ``glide`` tool for dependency management.
|
||||
|
||||
First, install ``glide``:
|
||||
|
||||
::
|
||||
|
||||
go get github.com/Masterminds/glide
|
||||
|
||||
Now we can fetch the correct versions of each dependency by running:
|
||||
|
||||
::
|
||||
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
glide install
|
||||
go install ./cmd/tendermint
|
||||
|
||||
Note that even though ``go get`` originally failed, the repository was
|
||||
still cloned to the correct location in the ``$GOPATH``.
|
||||
|
||||
The latest Tendermint Core version is now installed.
|
||||
|
||||
Reinstall
|
||||
---------
|
||||
|
||||
If you already have Tendermint installed, and you make updates, simply
|
||||
|
||||
::
|
||||
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
go install ./cmd/tendermint
|
||||
|
||||
To upgrade, there are a few options:
|
||||
|
||||
- set a new ``$GOPATH`` and run
|
||||
``go get github.com/tendermint/tendermint/cmd/tendermint``. This
|
||||
makes a fresh copy of everything for the new version.
|
||||
- run ``go get -u github.com/tendermint/tendermint/cmd/tendermint``,
|
||||
where the ``-u`` fetches the latest updates for the repository and
|
||||
its dependencies
|
||||
- fetch and checkout the latest master branch in
|
||||
``$GOPATH/src/github.com/tendermint/tendermint``, and then run
|
||||
``glide install && go install ./cmd/tendermint`` as above.
|
||||
|
||||
Note the first two options should usually work, but may fail. If they
|
||||
do, use ``glide``, as above:
|
||||
|
||||
::
|
||||
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
glide install
|
||||
go install ./cmd/tendermint
|
||||
|
||||
Since the third option just uses ``glide`` right away, it should always
|
||||
work.
|
||||
|
||||
Troubleshooting
|
||||
---------------
|
||||
|
||||
If ``go get`` failing bothers you, fetch the code using ``git``:
|
||||
|
||||
::
|
||||
|
||||
mkdir -p $GOPATH/src/github.com/tendermint
|
||||
git clone https://github.com/tendermint/tendermint $GOPATH/src/github.com/tendermint/tendermint
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
glide install
|
||||
go install ./cmd/tendermint
|
||||
|
||||
Run
|
||||
^^^
|
||||
|
||||
To start a one-node blockchain with a simple in-process application:
|
||||
|
||||
::
|
||||
|
||||
tendermint init
|
||||
tendermint node --proxy_app=dummy
|
231
docs/introduction.rst
Normal file
231
docs/introduction.rst
Normal file
@@ -0,0 +1,231 @@
|
||||
Introduction
|
||||
============
|
||||
|
||||
Welcome to the Tendermint guide! This is the best place to start if you are new
|
||||
to Tendermint.
|
||||
|
||||
What is Tendermint?
|
||||
-------------------
|
||||
|
||||
Tendermint is software for securely and consistently replicating an application on many machines.
|
||||
By securely, we mean that Tendermint works even if up to 1/3 of machines fail in arbitrary ways.
|
||||
By consistently, we mean that every non-faulty machine sees the same transaction log and computes the same state.
|
||||
Secure and consistent replication is a fundamental problem in distributed systems;
|
||||
it plays a critical role in the fault tolerance of a broad range of applications,
|
||||
from currencies, to elections, to infrastructure orchestration, and beyond.
|
||||
|
||||
The ability to tolerate machines failing in arbitrary ways, including becoming malicious, is known as Byzantine fault tolerance (BFT).
|
||||
The theory of BFT is decades old, but software implementations have only became popular recently,
|
||||
due largely to the success of "blockchain technology" like Bitcoin and Ethereum.
|
||||
Blockchain technology is just a reformalization of BFT in a more modern setting,
|
||||
with emphasis on peer-to-peer networking and cryptographic authentication.
|
||||
The name derives from the way transactions are batched in blocks,
|
||||
where each block contains a cryptographic hash of the previous one, forming a chain.
|
||||
In practice, the blockchain data structure actually optimizes BFT design.
|
||||
|
||||
Tendermint consists of two chief technical components: a blockchain consensus engine and a generic application interface.
|
||||
The consensus engine, called Tendermint Core, ensures that the same transactions are recorded on every machine in the same order.
|
||||
The application interface, called the Application BlockChain Interface (ABCI), enables the transactions to be processed in any programming language.
|
||||
Unlike other blockchain and consensus solutions, which come pre-packaged with built in state machines (like a fancy key-value store,
|
||||
or a quirky scripting language), developers can use Tendermint for BFT state machine replication of applications written in
|
||||
whatever programming language and development environment is right for them.
|
||||
|
||||
Tendermint is designed to be easy-to-use, simple-to-understand, highly performant, and useful
|
||||
for a wide variety of distributed applications.
|
||||
|
||||
Tendermint vs. X
|
||||
----------------
|
||||
|
||||
Tendermint vs. Other Software
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tendermint is broadly similar to two classes of software.
|
||||
The first class consists of distributed key-value stores,
|
||||
like Zookeeper, etcd, and consul, which use non-BFT consensus.
|
||||
The second class is known as "blockchain technology",
|
||||
and consists of both cryptocurrencies like Bitcoin and Ethereum,
|
||||
and alternative distributed ledger designs like Hyperledger's Burrow.
|
||||
|
||||
Zookeeper, etcd, consul
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Zookeeper, etcd, and consul are all implementations of a key-value store atop a classical,
|
||||
non-BFT consensus algorithm. Zookeeper uses a version of Paxos called Zookeeper Atomic Broadcast,
|
||||
while etcd and consul use the Raft consensus algorithm, which is much younger and simpler.
|
||||
A typical cluster contains 3-5 machines, and can tolerate crash failures in up to 1/2 of the machines,
|
||||
but even a single Byzantine fault can destroy the system.
|
||||
|
||||
Each offering provides a slightly different implementation of a featureful key-value store,
|
||||
but all are generally focused around providing basic services to distributed systems,
|
||||
such as dynamic configuration, service discovery, locking, leader-election, and so on.
|
||||
|
||||
Tendermint is in essence similar software, but with two key differences:
|
||||
- It is Byzantine Fault Tolerant, meaning it can only tolerate up to a 1/3 of failures,
|
||||
but those failures can include arbitrary behaviour - including hacking and malicious attacks.
|
||||
- It does not specify a particular application, like a fancy key-value store. Instead,
|
||||
it focuses on arbitrary state machine replication, so developers can build the application logic
|
||||
that's right for them, from key-value store to cryptocurrency to e-voting platform and beyond.
|
||||
|
||||
The layout of this Tendermint website content is also ripped directly and without shame from
|
||||
`consul.io <https://www.consul.io/>`__ and the other `Hashicorp sites <https://www.hashicorp.com/#tools>`__.
|
||||
|
||||
Bitcoin, Ethereum, etc.
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tendermint emerged in the tradition of cryptocurrencies like Bitcoin, Ethereum, etc.
|
||||
with the goal of providing a more efficient and secure consensus algorithm than Bitcoin's Proof of Work.
|
||||
In the early days, Tendermint had a simple currency built in, and to participate in consensus,
|
||||
users had to "bond" units of the currency into a security deposit which could be revoked if they misbehaved -
|
||||
this is what made Tendermint a Proof-of-Stake algorithm.
|
||||
|
||||
Since then, Tendermint has evolved to be a general purpose blockchain consensus engine that can host arbitrary application states.
|
||||
That means it can be used as a plug-and-play replacement for the consensus engines of other blockchain software.
|
||||
So one can take the current Ethereum code base, whether in Rust, or Go, or Haskell, and run it as a ABCI application
|
||||
using Tendermint consensus. Indeed, `we did that with Ethereum <https://github.com/tendermint/ethermint>`__.
|
||||
And we plan to do the same for Bitcoin, ZCash, and various other deterministic applications as well.
|
||||
|
||||
Another example of a cryptocurrency application built on Tendermint is `the Cosmos network <http://cosmos.network>`__.
|
||||
|
||||
Other Blockchain Projects
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
`Fabric <https://github.com/hyperledger/fabric>`__ takes a similar approach to Tendermint, but is more opinionated about how the state is managed,
|
||||
and requires that all application behaviour runs in potentially many docker containers, modules it calls "chaincode".
|
||||
It uses an implementation of `PBFT <http://pmg.csail.mit.edu/papers/osdi99.pdf>`__.
|
||||
from a team at IBM that is
|
||||
`augmented to handle potentially non-deterministic chaincode <https://www.zurich.ibm.com/~cca/papers/sieve.pdf>`__
|
||||
It is possible to implement this docker-based behaviour as a ABCI app in Tendermint,
|
||||
though extending Tendermint to handle non-determinism remains for future work.
|
||||
|
||||
`Burrow <https://github.com/hyperledger/burrow>`__ is an implementation of the Ethereum Virtual Machine and Ethereum transaction mechanics,
|
||||
with additional features for a name-registry, permissions, and native contracts, and an alternative blockchain API.
|
||||
It uses Tendermint as its consensus engine, and provides a particular application state.
|
||||
|
||||
ABCI Overview
|
||||
-------------
|
||||
|
||||
The `Application BlockChain Interface (ABCI) <https://github.com/tendermint/abci>`__ allows for Byzantine Fault Tolerant replication of applications written in any programming language.
|
||||
|
||||
Motivation
|
||||
~~~~~~~~~~
|
||||
|
||||
Thus far, all blockchains "stacks" (such as `Bitcoin <https://github.com/bitcoin/bitcoin>`__) have had a monolithic design. That is, each blockchain stack is a single program that handles all the concerns of a decentralized ledger; this includes P2P connectivity, the "mempool" broadcasting of transactions, consensus on the most recent block, account balances, Turing-complete contracts, user-level permissions, etc.
|
||||
|
||||
Using a monolithic architecture is typically bad practice in computer science.
|
||||
It makes it difficult to reuse components of the code, and attempts to do so result in complex maintanence procedures for forks of the codebase.
|
||||
This is especially true when the codebase is not modular in design and suffers from "spaghetti code".
|
||||
|
||||
Another problem with monolithic design is that it limits you to the language of the blockchain stack (or vice versa). In the case of Ethereum which supports a Turing-complete bytecode virtual-machine, it limits you to languages that compile down to that bytecode; today, those are Serpent and Solidity.
|
||||
|
||||
In contrast, our approach is to decouple the consensus engine and P2P layers from the details of the application state of the particular blockchain application.
|
||||
We do this by abstracting away the details of the application to an interface, which is implemented as a socket protocol.
|
||||
|
||||
Thus we have an interface, the Application BlockChain Interface (ABCI), and its primary implementation, the Tendermint Socket Protocol (TSP, or Teaspoon).
|
||||
|
||||
Intro to ABCI
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
`Tendermint Core <https://github.com/tendermint/tendermint>`__ (the "consensus engine") communicates with the application via a socket protocol that
|
||||
satisfies the `ABCI <https://github.com/tendermint/abci>`__.
|
||||
|
||||
To draw an analogy, lets talk about a well-known cryptocurrency, Bitcoin. Bitcoin is a cryptocurrency blockchain where each node maintains a fully audited Unspent Transaction Output (UTXO) database. If one wanted to create a Bitcoin-like system on top of ABCI, Tendermint Core would be responsible for
|
||||
|
||||
- Sharing blocks and transactions between nodes
|
||||
- Establishing a canonical/immutable order of transactions (the blockchain)
|
||||
|
||||
The application will be responsible for
|
||||
|
||||
- Maintaining the UTXO database
|
||||
- Validating cryptographic signatures of transactions
|
||||
- Preventing transactions from spending non-existent transactions
|
||||
- Allowing clients to query the UTXO database.
|
||||
|
||||
Tendermint is able to decompose the blockchain design by offering a very simple API (ie. the ABCI) between the application process and consensus process.
|
||||
|
||||
The ABCI consists of 3 primary message types that get delivered from the core to the application. The application replies with corresponding response messages.
|
||||
|
||||
The messages are specified here: `ABCI Message Types <https://github.com/tendermint/abci#message-types>`__.
|
||||
|
||||
The `DeliverTx` message is the work horse of the application. Each transaction in the blockchain is delivered with this message. The application needs to validate each transaction received with the `DeliverTx` message against the current state, application protocol, and the cryptographic credentials of the transaction. A validated transaction then needs to update the application state — by binding a value into a key values store, or by updating the UTXO database, for instance.
|
||||
|
||||
The `CheckTx` message is similar to `DeliverTx`, but it's only for validating transactions. Tendermint Core's mempool first checks the validity of a transaction with `CheckTx`, and only relays valid transactions to its peers. For instance, an application may check an incrementing sequence number in the transaction and return an error upon `CheckTx` if the sequence number is old. Alternatively, they might use a capabilities based system that requires capabilities to be renewed with every transaction.
|
||||
|
||||
The `Commit` message is used to compute a cryptographic commitment to the current application state, to be placed into the next block header. This has some handy properties. Inconsistencies in updating that state will now appear as blockchain forks which catches a whole class of programming errors. This also simplifies the development of secure lightweight clients, as Merkle-hash proofs can be verified by checking against the block hash, and that the block hash is signed by a quorum.
|
||||
|
||||
There can be multiple ABCI socket connections to an application. Tendermint Core creates three ABCI connections to the application; one for the validation of transactions when broadcasting in the mempool, one for the consensus engine to run block proposals, and one more for querying the application state.
|
||||
|
||||
It's probably evident that applications designers need to very carefully design their message handlers to create a blockchain that does anything useful but this architecture provides a place to start. The diagram below illustrates the flow of messages via ABCI.
|
||||
|
||||
.. figure:: assets/abci.png
|
||||
|
||||
A Note on Determinism
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The logic for blockchain transaction processing must be deterministic. If the application logic weren't deterministic, consensus would not be reached among the Tendermint Core replica nodes.
|
||||
|
||||
Solidity on Ethereum is a great language of choice for blockchain applications because, among other reasons, it is a completely deterministic programming language. However, it's also possible to create deterministic applications using existing popular languages like Java, C++, Python, or Go. Game programmers and blockchain developers are already familiar with creating deterministic programs by avoiding sources of non-determinism such as:
|
||||
|
||||
* random number generators (without deterministic seeding)
|
||||
* race conditions on threads (or avoiding threads altogether)
|
||||
* the system clock
|
||||
* uninitialized memory (in unsafe programming languages like C or C++)
|
||||
* `floating point arithmetic <http://gafferongames.com/networking-for-game-programmers/floating-point-determinism/>`__.
|
||||
* language features that are random (e.g. map iteration in Go)
|
||||
|
||||
While programmers can avoid non-determinism by being careful, it is also possible to create a special linter or static analyzer for each language to check for determinism. In the future we may work with partners to create such tools.
|
||||
|
||||
Consensus Overview
|
||||
------------------
|
||||
|
||||
Tendermint is an easy-to-understand, mostly asynchronous, BFT consensus protocol.
|
||||
The protocol follows a simple state machine that looks like this:
|
||||
|
||||
.. figure:: assets/consensus_logic.png
|
||||
|
||||
Participants in the protocol are called "validators";
|
||||
they take turns proposing blocks of transactions and voting on them.
|
||||
Blocks are committed in a chain, with one block at each "height".
|
||||
A block may fail to be committed, in which case the protocol moves to the next "round",
|
||||
and a new validator gets to propose a block for that height.
|
||||
Two stages of voting are required to successfully commit a block;
|
||||
we call them "pre-vote" and "pre-commit".
|
||||
A block is committed when more than 2/3 of validators pre-commit for the same block in the same round.
|
||||
|
||||
There is a picture of a couple doing the polka because validators are doing something like a polka dance.
|
||||
When more than two-thirds of the validators pre-vote for the same block, we call that a "polka".
|
||||
Every pre-commit must be justified by a polka in the same round.
|
||||
|
||||
Validators may fail to commit a block for a number of reasons;
|
||||
the current proposer may be offline, or the network may be slow.
|
||||
Tendermint allows them to establish that a validator should be skipped.
|
||||
Validators wait a small amount of time to receive a complete proposal block from the proposer before voting to move to the next round.
|
||||
This reliance on a timeout is what makes Tendermint a weakly synchronous protocol, rather than an asynchronous one.
|
||||
However, the rest of the protocol is asynchronous, and validators only make progress after hearing from more than two-thirds of the validator set.
|
||||
A simplifying element of Tendermint is that it uses the same mechanism to commit a block as it does to skip to the next round.
|
||||
|
||||
Assuming less than one-third of the validators are Byzantine, Tendermint guarantees that safety will never be violated - that is, validators will never commit conflicting blocks at the same height.
|
||||
To do this it introduces a few "locking" rules which modulate which paths can be followed in the flow diagram.
|
||||
Once a validator precommits a block, it is "locked" on that block.
|
||||
Then,
|
||||
|
||||
1) it must prevote for the block it is locked on
|
||||
2) it can only unlock, and precommit for a new block, if there is a polka for that block in a later round
|
||||
|
||||
Stake
|
||||
-----
|
||||
|
||||
In many systems, not all validators will have the same "weight" in the consensus protocol.
|
||||
Thus, we are not so much interested in one-third or two-thirds of the validators, but in those proportions of the total voting power,
|
||||
which may not be uniformly distributed across individual validators.
|
||||
|
||||
Since Tendermint can replicate arbitrary applications, it is possible to define a currency, and denominate the voting power in that currency.
|
||||
When voting power is denominated in a native currency, the system is often referred to as Proof-of-Stake.
|
||||
Validators can be forced, by logic in the application,
|
||||
to "bond" their currency holdings in a security deposit that can be destroyed if they're found to misbehave in the consensus protocol.
|
||||
This adds an economic element to the security of the protocol, allowing one to quantify the cost of violating the assumption that less than one-third of voting power is Byzantine.
|
||||
|
||||
The `Cosmos Network <http://cosmos.network>`__ is designed to use this Proof-of-Stake mechanism across an array of cryptocurrencies implemented as ABCI applications.
|
||||
|
||||
The following diagram is Tendermint in a (technical) nutshell. `See here for high resolution version <https://github.com/mobfoundry/hackatom/blob/master/tminfo.pdf>`__.
|
||||
|
||||
.. figure:: assets/tm-transaction-flow.png
|
4
docs/requirements.txt
Normal file
4
docs/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
sphinx
|
||||
sphinx-autobuild
|
||||
recommonmark
|
||||
sphinx_rtd_theme
|
20
docs/specification.rst
Normal file
20
docs/specification.rst
Normal file
@@ -0,0 +1,20 @@
|
||||
#############
|
||||
Specification
|
||||
#############
|
||||
|
||||
Here you'll find details of the Tendermint specification. See `the spec repo <https://github.com/tendermint/spec>`__ for upcoming material. Tendermint's types are produced by `godoc <https://godoc.org/github.com/tendermint/tendermint/types>`__
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
specification/block-structure.rst
|
||||
specification/byzantine-consensus-algorithm.rst
|
||||
specification/configuration.rst
|
||||
specification/fast-sync.rst
|
||||
specification/genesis.rst
|
||||
specification/light-client-protocol.rst
|
||||
specification/merkle.rst
|
||||
specification/rpc.rst
|
||||
specification/secure-p2p.rst
|
||||
specification/validators.rst
|
||||
specification/wire-protocol.rst
|
220
docs/specification/block-structure.rst
Normal file
220
docs/specification/block-structure.rst
Normal file
@@ -0,0 +1,220 @@
|
||||
Block Structure
|
||||
===============
|
||||
|
||||
The tendermint consensus engine records all agreements by a
|
||||
supermajority of nodes into a blockchain, which is replicated among all
|
||||
nodes. This blockchain is accessible via various rpc endpoints, mainly
|
||||
``/block?height=`` to get the full block, as well as
|
||||
``/blockchain?minHeight=_&maxHeight=_`` to get a list of headers. But
|
||||
what exactly is stored in these blocks?
|
||||
|
||||
Block
|
||||
~~~~~
|
||||
|
||||
A
|
||||
`Block <https://godoc.org/github.com/tendermint/tendermint/types#Block>`__
|
||||
contains:
|
||||
|
||||
- a `Header <#header>`__ contains merkle hashes for various chain
|
||||
states
|
||||
- the
|
||||
`Data <https://godoc.org/github.com/tendermint/tendermint/types#Data>`__
|
||||
is all transactions which are to be processed
|
||||
- the `LastCommit <#commit>`__ > 2/3 signatures for the last block
|
||||
|
||||
The signatures returned along with block ``H`` are those validating
|
||||
block ``H-1``. This can be a little confusing, but we must also consider
|
||||
that the ``Header`` also contains the ``LastCommitHash``. It would be
|
||||
impossible for a Header to include the commits that sign it, as it would
|
||||
cause an infinite loop here. But when we get block ``H``, we find
|
||||
``Header.LastCommitHash``, which must match the hash of ``LastCommit``.
|
||||
|
||||
Header
|
||||
~~~~~~
|
||||
|
||||
The
|
||||
`Header <https://godoc.org/github.com/tendermint/tendermint/types#Header>`__
|
||||
contains lots of information (follow link for up-to-date info). Notably,
|
||||
it maintains the ``Height``, the ``LastBlockID`` (to make it a chain),
|
||||
and hashes of the data, the app state, and the validator set. This is
|
||||
important as the only item that is signed by the validators is the
|
||||
``Header``, and all other data must be validated against one of the
|
||||
merkle hashes in the ``Header``.
|
||||
|
||||
The ``DataHash`` can provide a nice check on the
|
||||
`Data <https://godoc.org/github.com/tendermint/tendermint/types#Data>`__
|
||||
returned in this same block. If you are subscribed to new blocks, via
|
||||
tendermint RPC, in order to display or process the new transactions you
|
||||
should at least validate that the ``DataHash`` is valid. If it is
|
||||
important to verify autheniticity, you must wait for the ``LastCommit``
|
||||
from the next block to make sure the block header (including
|
||||
``DataHash``) was properly signed.
|
||||
|
||||
The ``ValidatorHash`` contains a hash of the current
|
||||
`Validators <https://godoc.org/github.com/tendermint/tendermint/types#Validator>`__.
|
||||
Tracking all changes in the validator set is complex, but a client can
|
||||
quickly compare this hash with the `hash of the currently known
|
||||
validators <https://godoc.org/github.com/tendermint/tendermint/types#ValidatorSet.Hash>`__
|
||||
to see if there have been changes.
|
||||
|
||||
The ``AppHash`` serves as the basis for validating any merkle proofs
|
||||
that come from the `ABCI
|
||||
application <https://github.com/tendermint/abci>`__. It represents the
|
||||
state of the actual application, rather that the state of the blockchain
|
||||
itself. This means it's necessary in order to perform any business
|
||||
logic, such as verifying and account balance.
|
||||
|
||||
**Note** After the transactions are committed to a block, they still
|
||||
need to be processed in a separate step, which happens between the
|
||||
blocks. If you find a given transaction in the block at height ``H``,
|
||||
the effects of running that transaction will be first visible in the
|
||||
``AppHash`` from the block header at height ``H+1``.
|
||||
|
||||
Like the ``LastCommit`` issue, this is a requirement of the immutability
|
||||
of the block chain, as the application only applies transactions *after*
|
||||
they are commited to the chain.
|
||||
|
||||
Commit
|
||||
~~~~~~
|
||||
|
||||
The
|
||||
`Commit <https://godoc.org/github.com/tendermint/tendermint/types#Commit>`__
|
||||
contains a set of
|
||||
`Votes <https://godoc.org/github.com/tendermint/tendermint/types#Vote>`__
|
||||
that were made by the validator set to reach consensus on this block.
|
||||
This is the key to the security in any PoS system, and actually no data
|
||||
that cannot be traced back to a block header with a valid set of Votes
|
||||
can be trusted. Thus, getting the Commit data and verifying the votes is
|
||||
extremely important.
|
||||
|
||||
As mentioned above, in order to find the ``precommit votes`` for block
|
||||
header ``H``, we need to query block ``H+1``. Then we need to check the
|
||||
votes, make sure they really are for that block, and properly formatted.
|
||||
Much of this code is implemented in Go in the
|
||||
`light-client <https://github.com/tendermint/light-client>`__ package.
|
||||
If you look at the code, you will notice that we need to provide the
|
||||
``chainID`` of the blockchain in order to properly calculate the votes.
|
||||
This is to protect anyone from swapping votes between chains to fake (or
|
||||
frame) a validator. Also note that this ``chainID`` is in the
|
||||
``genesis.json`` from *Tendermint*, not the ``genesis.json`` from the
|
||||
basecoin app (`that is a different
|
||||
chainID... <https://github.com/tendermint/basecoin/issues/32>`__).
|
||||
|
||||
Once we have those votes, and we calculated the proper `sign
|
||||
bytes <https://godoc.org/github.com/tendermint/tendermint/types#Vote.WriteSignBytes>`__
|
||||
using the chainID and a `nice helper
|
||||
function <https://godoc.org/github.com/tendermint/tendermint/types#SignBytes>`__,
|
||||
we can verify them. The light client is responsible for maintaining a
|
||||
set of validators that we trust. Each vote only stores the validators
|
||||
``Address``, as well as the ``Signature``. Assuming we have a local copy
|
||||
of the trusted validator set, we can look up the ``Public Key`` of the
|
||||
validator given its ``Address``, then verify that the ``Signature``
|
||||
matches the ``SignBytes`` and ``Public Key``. Then we sum up the total
|
||||
voting power of all validators, whose votes fulfilled all these
|
||||
stringent requirements. If the total number of voting power for a single
|
||||
block is greater than 2/3 of all voting power, then we can finally trust
|
||||
the block header, the AppHash, and the proof we got from the ABCI
|
||||
application.
|
||||
|
||||
Vote Sign Bytes
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
The ``sign-bytes`` of a vote is produced by taking a
|
||||
`stable-json <https://github.com/substack/json-stable-stringify>`__-like
|
||||
deterministic JSON `wire <./wire-protocol.html>`__ encoding of
|
||||
the vote (excluding the ``Signature`` field), and wrapping it with
|
||||
``{"chain_id":"my_chain","vote":...}``.
|
||||
|
||||
For example, a precommit vote might have the following ``sign-bytes``:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{"chain_id":"my_chain","vote":{"block_hash":"611801F57B4CE378DF1A3FFF1216656E89209A99","block_parts_header":{"hash":"B46697379DBE0774CC2C3B656083F07CA7E0F9CE","total":123},"height":1234,"round":1,"type":2}}
|
||||
|
||||
Block Hash
|
||||
~~~~~~~~~~
|
||||
|
||||
The `block
|
||||
hash <https://godoc.org/github.com/tendermint/tendermint/types#Block.Hash>`__
|
||||
is the `Simple Tree hash <Merkle-Trees#simple-tree-with-dictionaries>`__
|
||||
of the fields of the block ``Header`` encoded as a list of
|
||||
``KVPair``\ s.
|
||||
|
||||
Transaction
|
||||
~~~~~~~~~~~
|
||||
|
||||
A transaction is any sequence of bytes. It is up to your
|
||||
`ABCI <https://github.com/tendermint/abci>`__ application to accept or
|
||||
reject transactions.
|
||||
|
||||
BlockID
|
||||
~~~~~~~
|
||||
|
||||
Many of these data structures refer to the
|
||||
`BlockID <https://godoc.org/github.com/tendermint/tendermint/types#BlockID>`__,
|
||||
which is the ``BlockHash`` (hash of the block header, also referred to
|
||||
by the next block) along with the ``PartSetHeader``. The
|
||||
``PartSetHeader`` is explained below and is used internally to
|
||||
orchestrate the p2p propogation. For clients, it is basically opaque
|
||||
bytes, but they must match for all votes.
|
||||
|
||||
PartSetHeader
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
The
|
||||
`PartSetHeader <https://godoc.org/github.com/tendermint/tendermint/types#PartSetHeader>`__
|
||||
contains the total number of pieces in a
|
||||
`PartSet <https://godoc.org/github.com/tendermint/tendermint/types#PartSet>`__,
|
||||
and the Merkle root hash of those pieces.
|
||||
|
||||
PartSet
|
||||
~~~~~~~
|
||||
|
||||
PartSet is used to split a byteslice of data into parts (pieces) for
|
||||
transmission. By splitting data into smaller parts and computing a
|
||||
Merkle root hash on the list, you can verify that a part is legitimately
|
||||
part of the complete data, and the part can be forwarded to other peers
|
||||
before all the parts are known. In short, it's a fast way to securely
|
||||
propagate a large chunk of data (like a block) over a gossip network.
|
||||
|
||||
PartSet was inspired by the LibSwift project.
|
||||
|
||||
Usage:
|
||||
|
||||
.. code:: go
|
||||
|
||||
data := RandBytes(2 << 20) // Something large
|
||||
|
||||
partSet := NewPartSetFromData(data)
|
||||
partSet.Total() // Total number of 4KB parts
|
||||
partSet.Count() // Equal to the Total, since we already have all the parts
|
||||
partSet.Hash() // The Merkle root hash
|
||||
partSet.BitArray() // A BitArray of partSet.Total() 1's
|
||||
|
||||
header := partSet.Header() // Send this to the peer
|
||||
header.Total // Total number of parts
|
||||
header.Hash // The merkle root hash
|
||||
|
||||
// Now we'll reconstruct the data from the parts
|
||||
partSet2 := NewPartSetFromHeader(header)
|
||||
partSet2.Total() // Same total as partSet.Total()
|
||||
partSet2.Count() // Zero, since this PartSet doesn't have any parts yet.
|
||||
partSet2.Hash() // Same hash as in partSet.Hash()
|
||||
partSet2.BitArray() // A BitArray of partSet.Total() 0's
|
||||
|
||||
// In a gossip network the parts would arrive in arbitrary order, perhaps
|
||||
// in response to explicit requests for parts, or optimistically in response
|
||||
// to the receiving peer's partSet.BitArray().
|
||||
for !partSet2.IsComplete() {
|
||||
part := receivePartFromGossipNetwork()
|
||||
added, err := partSet2.AddPart(part)
|
||||
if err != nil {
|
||||
// A wrong part,
|
||||
// the merkle trail does not hash to partSet2.Hash()
|
||||
} else if !added {
|
||||
// A duplicate part already received
|
||||
}
|
||||
}
|
||||
|
||||
data2, _ := ioutil.ReadAll(partSet2.GetReader())
|
||||
bytes.Equal(data, data2) // true
|
349
docs/specification/byzantine-consensus-algorithm.rst
Normal file
349
docs/specification/byzantine-consensus-algorithm.rst
Normal file
@@ -0,0 +1,349 @@
|
||||
Byzantine Consensus Algorithm
|
||||
=============================
|
||||
|
||||
Terms
|
||||
-----
|
||||
|
||||
- The network is composed of optionally connected *nodes*. Nodes
|
||||
directly connected to a particular node are called *peers*.
|
||||
- The consensus process in deciding the next block (at some *height*
|
||||
``H``) is composed of one or many *rounds*.
|
||||
- ``NewHeight``, ``Propose``, ``Prevote``, ``Precommit``, and
|
||||
``Commit`` represent state machine states of a round. (aka
|
||||
``RoundStep`` or just "step").
|
||||
- A node is said to be *at* a given height, round, and step, or at
|
||||
``(H,R,S)``, or at ``(H,R)`` in short to omit the step.
|
||||
- To *prevote* or *precommit* something means to broadcast a `prevote
|
||||
vote <https://godoc.org/github.com/tendermint/tendermint/types#Vote>`__
|
||||
or `first precommit
|
||||
vote <https://godoc.org/github.com/tendermint/tendermint/types#FirstPrecommit>`__
|
||||
for something.
|
||||
- A vote *at* ``(H,R)`` is a vote signed with the bytes for ``H`` and
|
||||
``R`` included in its
|
||||
`sign-bytes <block-structure.html#vote-sign-bytes>`__.
|
||||
- *+2/3* is short for "more than 2/3"
|
||||
- *1/3+* is short for "1/3 or more"
|
||||
- A set of +2/3 of prevotes for a particular block or ``<nil>`` at
|
||||
``(H,R)`` is called a *proof-of-lock-change* or *PoLC* for short.
|
||||
|
||||
State Machine Overview
|
||||
----------------------
|
||||
|
||||
At each height of the blockchain a round-based protocol is run to
|
||||
determine the next block. Each round is composed of three *steps*
|
||||
(``Propose``, ``Prevote``, and ``Precommit``), along with two special
|
||||
steps ``Commit`` and ``NewHeight``.
|
||||
|
||||
In the optimal scenario, the order of steps is:
|
||||
|
||||
::
|
||||
|
||||
NewHeight -> (Propose -> Prevote -> Precommit)+ -> Commit -> NewHeight ->...
|
||||
|
||||
The sequence ``(Propose -> Prevote -> Precommit)`` is called a *round*.
|
||||
There may be more than one round required to commit a block at a given
|
||||
height. Examples for why more rounds may be required include:
|
||||
|
||||
- The designated proposer was not online.
|
||||
- The block proposed by the designated proposer was not valid.
|
||||
- The block proposed by the designated proposer did not propagate in
|
||||
time.
|
||||
- The block proposed was valid, but +2/3 of prevotes for the proposed
|
||||
block were not received in time for enough validator nodes by the
|
||||
time they reached the ``Precommit`` step. Even though +2/3 of
|
||||
prevotes are necessary to progress to the next step, at least one
|
||||
validator may have voted ``<nil>`` or maliciously voted for something
|
||||
else.
|
||||
- The block proposed was valid, and +2/3 of prevotes were received for
|
||||
enough nodes, but +2/3 of precommits for the proposed block were not
|
||||
received for enough validator nodes.
|
||||
|
||||
Some of these problems are resolved by moving onto the next round &
|
||||
proposer. Others are resolved by increasing certain round timeout
|
||||
parameters over each successive round.
|
||||
|
||||
State Machine Diagram
|
||||
---------------------
|
||||
|
||||
::
|
||||
|
||||
+-------------------------------------+
|
||||
v |(Wait til `CommmitTime+timeoutCommit`)
|
||||
+-----------+ +-----+-----+
|
||||
+----------> | Propose +--------------+ | NewHeight |
|
||||
| +-----------+ | +-----------+
|
||||
| | ^
|
||||
|(Else, after timeoutPrecommit) v |
|
||||
+-----+-----+ +-----------+ |
|
||||
| Precommit | <------------------------+ Prevote | |
|
||||
+-----+-----+ +-----------+ |
|
||||
|(When +2/3 Precommits for block found) |
|
||||
v |
|
||||
+--------------------------------------------------------------------+
|
||||
| Commit |
|
||||
| |
|
||||
| * Set CommitTime = now; |
|
||||
| * Wait for block, then stage/save/commit block; |
|
||||
+--------------------------------------------------------------------+
|
||||
|
||||
Background Gossip
|
||||
-----------------
|
||||
|
||||
A node may not have a corresponding validator private key, but it
|
||||
nevertheless plays an active role in the consensus process by relaying
|
||||
relevant meta-data, proposals, blocks, and votes to its peers. A node
|
||||
that has the private keys of an active validator and is engaged in
|
||||
signing votes is called a *validator-node*. All nodes (not just
|
||||
validator-nodes) have an associated state (the current height, round,
|
||||
and step) and work to make progress.
|
||||
|
||||
Between two nodes there exists a ``Connection``, and multiplexed on top
|
||||
of this connection are fairly throttled ``Channel``\ s of information.
|
||||
An epidemic gossip protocol is implemented among some of these channels
|
||||
to bring peers up to speed on the most recent state of consensus. For
|
||||
example,
|
||||
|
||||
- Nodes gossip ``PartSet`` parts of the current round's proposer's
|
||||
proposed block. A LibSwift inspired algorithm is used to quickly
|
||||
broadcast blocks across the gossip network.
|
||||
- Nodes gossip prevote/precommit votes. A node NODE\_A that is ahead of
|
||||
NODE\_B can send NODE\_B prevotes or precommits for NODE\_B's current
|
||||
(or future) round to enable it to progress forward.
|
||||
- Nodes gossip prevotes for the proposed PoLC (proof-of-lock-change)
|
||||
round if one is proposed.
|
||||
- Nodes gossip to nodes lagging in blockchain height with block
|
||||
`commits <https://godoc.org/github.com/tendermint/tendermint/types#Commit>`__
|
||||
for older blocks.
|
||||
- Nodes opportunistically gossip ``HasVote`` messages to hint peers
|
||||
what votes it already has.
|
||||
- Nodes broadcast their current state to all neighboring peers. (but is
|
||||
not gossiped further)
|
||||
|
||||
There's more, but let's not get ahead of ourselves here.
|
||||
|
||||
Proposals
|
||||
---------
|
||||
|
||||
A proposal is signed and published by the designated proposer at each
|
||||
round. The proposer is chosen by a deterministic and non-choking round
|
||||
robin selection algorithm that selects proposers in proportion to their
|
||||
voting power. (see
|
||||
`implementation <https://github.com/tendermint/tendermint/blob/develop/types/validator_set.go>`__)
|
||||
|
||||
A proposal at ``(H,R)`` is composed of a block and an optional latest
|
||||
``PoLC-Round < R`` which is included iff the proposer knows of one. This
|
||||
hints the network to allow nodes to unlock (when safe) to ensure the
|
||||
liveness property.
|
||||
|
||||
State Machine Spec
|
||||
------------------
|
||||
|
||||
Propose Step (height:H,round:R)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Upon entering ``Propose``: - The designated proposer proposes a block at
|
||||
``(H,R)``.
|
||||
|
||||
The ``Propose`` step ends: - After ``timeoutProposeR`` after entering
|
||||
``Propose``. --> goto ``Prevote(H,R)`` - After receiving proposal block
|
||||
and all prevotes at ``PoLC-Round``. --> goto ``Prevote(H,R)`` - After
|
||||
`common exit conditions <#common-exit-conditions>`__
|
||||
|
||||
Prevote Step (height:H,round:R)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Upon entering ``Prevote``, each validator broadcasts its prevote vote.
|
||||
|
||||
- First, if the validator is locked on a block since ``LastLockRound``
|
||||
but now has a PoLC for something else at round ``PoLC-Round`` where
|
||||
``LastLockRound < PoLC-Round < R``, then it unlocks.
|
||||
- If the validator is still locked on a block, it prevotes that.
|
||||
- Else, if the proposed block from ``Propose(H,R)`` is good, it
|
||||
prevotes that.
|
||||
- Else, if the proposal is invalid or wasn't received on time, it
|
||||
prevotes ``<nil>``.
|
||||
|
||||
The ``Prevote`` step ends: - After +2/3 prevotes for a particular block
|
||||
or ``<nil>``. --> goto ``Precommit(H,R)`` - After ``timeoutPrevote``
|
||||
after receiving any +2/3 prevotes. --> goto ``Precommit(H,R)`` - After
|
||||
`common exit conditions <#common-exit-conditions>`__
|
||||
|
||||
Precommit Step (height:H,round:R)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Upon entering ``Precommit``, each validator broadcasts its precommit
|
||||
vote. - If the validator has a PoLC at ``(H,R)`` for a particular block
|
||||
``B``, it (re)locks (or changes lock to) and precommits ``B`` and sets
|
||||
``LastLockRound = R``. - Else, if the validator has a PoLC at ``(H,R)``
|
||||
for ``<nil>``, it unlocks and precommits ``<nil>``. - Else, it keeps the
|
||||
lock unchanged and precommits ``<nil>``.
|
||||
|
||||
A precommit for ``<nil>`` means "I didn’t see a PoLC for this round, but
|
||||
I did get +2/3 prevotes and waited a bit".
|
||||
|
||||
The Precommit step ends: - After +2/3 precommits for ``<nil>``. --> goto
|
||||
``Propose(H,R+1)`` - After ``timeoutPrecommit`` after receiving any +2/3
|
||||
precommits. --> goto ``Propose(H,R+1)`` - After `common exit
|
||||
conditions <#common-exit-conditions>`__
|
||||
|
||||
common exit conditions
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
- After +2/3 precommits for a particular block. --> goto ``Commit(H)``
|
||||
- After any +2/3 prevotes received at ``(H,R+x)``. --> goto
|
||||
``Prevote(H,R+x)``
|
||||
- After any +2/3 precommits received at ``(H,R+x)``. --> goto
|
||||
``Precommit(H,R+x)``
|
||||
|
||||
Commit Step (height:H)
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- Set ``CommitTime = now()``
|
||||
- Wait until block is received. --> goto ``NewHeight(H+1)``
|
||||
|
||||
NewHeight Step (height:H)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
- Move ``Precommits`` to ``LastCommit`` and increment height.
|
||||
- Set ``StartTime = CommitTime+timeoutCommit``
|
||||
- Wait until ``StartTime`` to receive straggler commits. --> goto
|
||||
``Propose(H,0)``
|
||||
|
||||
Proofs
|
||||
------
|
||||
|
||||
Proof of Safety
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Assume that at most -1/3 of the voting power of validators is byzantine.
|
||||
If a validator commits block ``B`` at round ``R``, it's because it saw
|
||||
+2/3 of precommits at round ``R``. This implies that 1/3+ of honest
|
||||
nodes are still locked at round ``R' > R``. These locked validators will
|
||||
remain locked until they see a PoLC at ``R' > R``, but this won't happen
|
||||
because 1/3+ are locked and honest, so at most -2/3 are available to
|
||||
vote for anything other than ``B``.
|
||||
|
||||
Proof of Liveness
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
If 1/3+ honest validators are locked on two different blocks from
|
||||
different rounds, a proposers' ``PoLC-Round`` will eventually cause
|
||||
nodes locked from the earlier round to unlock. Eventually, the
|
||||
designated proposer will be one that is aware of a PoLC at the later
|
||||
round. Also, ``timeoutProposalR`` increments with round ``R``, while the
|
||||
size of a proposal are capped, so eventually the network is able to
|
||||
"fully gossip" the whole proposal (e.g. the block & PoLC).
|
||||
|
||||
Proof of Fork Accountability
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Define the JSet (justification-vote-set) at height ``H`` of a validator
|
||||
``V1`` to be all the votes signed by the validator at ``H`` along with
|
||||
justification PoLC prevotes for each lock change. For example, if ``V1``
|
||||
signed the following precommits: ``Precommit(B1 @ round 0)``,
|
||||
``Precommit(<nil> @ round 1)``, ``Precommit(B2 @ round 4)`` (note that
|
||||
no precommits were signed for rounds 2 and 3, and that's ok),
|
||||
``Precommit(B1 @ round 0)`` must be justified by a PoLC at round 0, and
|
||||
``Precommit(B2 @ round 4)`` must be justified by a PoLC at round 4; but
|
||||
the precommit for ``<nil>`` at round 1 is not a lock-change by
|
||||
definition so the JSet for ``V1`` need not include any prevotes at round
|
||||
1, 2, or 3 (unless ``V1`` happened to have prevoted for those rounds).
|
||||
|
||||
Further, define the JSet at height ``H`` of a set of validators ``VSet``
|
||||
to be the union of the JSets for each validator in ``VSet``. For a given
|
||||
commit by honest validators at round ``R`` for block ``B`` we can
|
||||
construct a JSet to justify the commit for ``B`` at ``R``. We say that a
|
||||
JSet *justifies* a commit at ``(H,R)`` if all the committers (validators
|
||||
in the commit-set) are each justified in the JSet with no duplicitous
|
||||
vote signatures (by the committers).
|
||||
|
||||
- **Lemma**: When a fork is detected by the existence of two
|
||||
conflicting `commits <./validators.html#commiting-a-block>`__,
|
||||
the union of the JSets for both commits (if they can be compiled)
|
||||
must include double-signing by at least 1/3+ of the validator set.
|
||||
**Proof**: The commit cannot be at the same round, because that would
|
||||
immediately imply double-signing by 1/3+. Take the union of the JSets
|
||||
of both commits. If there is no double-signing by at least 1/3+ of
|
||||
the validator set in the union, then no honest validator could have
|
||||
precommitted any different block after the first commit. Yet, +2/3
|
||||
did. Reductio ad absurdum.
|
||||
|
||||
As a corollary, when there is a fork, an external process can determine
|
||||
the blame by requiring each validator to justify all of its round votes.
|
||||
Either we will find 1/3+ who cannot justify at least one of their votes,
|
||||
and/or, we will find 1/3+ who had double-signed.
|
||||
|
||||
Alternative algorithm
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Alternatively, we can take the JSet of a commit to be the "full commit".
|
||||
That is, if light clients and validators do not consider a block to be
|
||||
committed unless the JSet of the commit is also known, then we get the
|
||||
desirable property that if there ever is a fork (e.g. there are two
|
||||
conflicting "full commits"), then 1/3+ of the validators are immediately
|
||||
punishable for double-signing.
|
||||
|
||||
There are many ways to ensure that the gossip network efficiently share
|
||||
the JSet of a commit. One solution is to add a new message type that
|
||||
tells peers that this node has (or does not have) a +2/3 majority for B
|
||||
(or ) at (H,R), and a bitarray of which votes contributed towards that
|
||||
majority. Peers can react by responding with appropriate votes.
|
||||
|
||||
We will implement such an algorithm for the next iteration of the
|
||||
Tendermint consensus protocol.
|
||||
|
||||
Other potential improvements include adding more data in votes such as
|
||||
the last known PoLC round that caused a lock change, and the last voted
|
||||
round/step (or, we may require that validators not skip any votes). This
|
||||
may make JSet verification/gossip logic easier to implement.
|
||||
|
||||
Censorship Attacks
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Due to the definition of a block
|
||||
`commit <validators.html#commiting-a-block>`__, any 1/3+
|
||||
coalition of validators can halt the blockchain by not broadcasting
|
||||
their votes. Such a coalition can also censor particular transactions by
|
||||
rejecting blocks that include these transactions, though this would
|
||||
result in a significant proportion of block proposals to be rejected,
|
||||
which would slow down the rate of block commits of the blockchain,
|
||||
reducing its utility and value. The malicious coalition might also
|
||||
broadcast votes in a trickle so as to grind blockchain block commits to
|
||||
a near halt, or engage in any combination of these attacks.
|
||||
|
||||
If a global active adversary were also involved, it can partition the
|
||||
network in such a way that it may appear that the wrong subset of
|
||||
validators were responsible for the slowdown. This is not just a
|
||||
limitation of Tendermint, but rather a limitation of all consensus
|
||||
protocols whose network is potentially controlled by an active
|
||||
adversary.
|
||||
|
||||
Overcoming Forks and Censorship Attacks
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
For these types of attacks, a subset of the validators through external
|
||||
means should coordinate to sign a reorg-proposal that chooses a fork
|
||||
(and any evidence thereof) and the initial subset of validators with
|
||||
their signatures. Validators who sign such a reorg-proposal forego its
|
||||
collateral on all other forks. Clients should verify the signatures on
|
||||
the reorg-proposal, verify any evidence, and make a judgement or prompt
|
||||
the end-user for a decision. For example, a phone wallet app may prompt
|
||||
the user with a security warning, while a refrigerator may accept any
|
||||
reorg-proposal signed by +½ of the original validators.
|
||||
|
||||
No non-synchronous Byzantine fault-tolerant algorithm can come to
|
||||
consensus when ⅓+ of validators are dishonest, yet a fork assumes that
|
||||
⅓+ of validators have already been dishonest by double-signing or
|
||||
lock-changing without justification. So, signing the reorg-proposal is a
|
||||
coordination problem that cannot be solved by any non-synchronous
|
||||
protocol (i.e. automatically, and without making assumptions about the
|
||||
reliability of the underlying network). It must be provided by means
|
||||
external to the weakly-synchronous Tendermint consensus algorithm. For
|
||||
now, we leave the problem of reorg-proposal coordination to human
|
||||
coordination via internet media. Validators must take care to ensure
|
||||
that there are no significant network partitions, to avoid situations
|
||||
where two conflicting reorg-proposals are signed.
|
||||
|
||||
Assuming that the external coordination medium and protocol is robust,
|
||||
it follows that forks are less of a concern than `censorship
|
||||
attacks <#censorship-attacks>`__.
|
55
docs/specification/configuration.rst
Normal file
55
docs/specification/configuration.rst
Normal file
@@ -0,0 +1,55 @@
|
||||
Configuration
|
||||
=============
|
||||
|
||||
TendermintCore can be configured via a TOML file in
|
||||
``$TMHOME/config.toml``. Some of these parameters can be overridden by
|
||||
command-line flags.
|
||||
|
||||
Config parameters
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
The main config parameters are defined
|
||||
`here <https://github.com/tendermint/tendermint/blob/master/config/config.go>`__.
|
||||
|
||||
- ``abci``: ABCI transport (socket \| grpc). *Default*: ``socket``
|
||||
- ``db_backend``: Database backend for the blockchain and
|
||||
TendermintCore state. ``leveldb`` or ``memdb``. *Default*:
|
||||
``"leveldb"``
|
||||
- ``db_dir``: Database dir. *Default*: ``"$TMHOME/data"``
|
||||
- ``fast_sync``: Whether to sync faster from the block pool. *Default*:
|
||||
``true``
|
||||
- ``genesis_file``: The location of the genesis file. *Default*:
|
||||
``"$TMHOME/genesis.json"``
|
||||
- ``log_level``: *Default*: ``"state:info,*:error"``
|
||||
- ``moniker``: Name of this node. *Default*: ``"anonymous"``
|
||||
- ``priv_validator_file``: Validator private key file. *Default*:
|
||||
``"$TMHOME/priv_validator.json"``
|
||||
- ``prof_laddr``: Profile listen address. *Default*: ``""``
|
||||
- ``proxy_app``: The ABCI app endpoint. *Default*:
|
||||
``"tcp://127.0.0.1:46658"``
|
||||
|
||||
- ``consensus.max_block_size_txs``: Maximum number of block txs.
|
||||
*Default*: ``10000``
|
||||
- ``consensus.timeout_*``: Various consensus timeout parameters
|
||||
**TODO**
|
||||
- ``consensus.wal_file``: Consensus state WAL. *Default*:
|
||||
``"$TMHOME/data/cswal"``
|
||||
- ``consensus.wal_light``: Whether to use light-mode for Consensus
|
||||
state WAL. *Default*: ``false``
|
||||
|
||||
- ``mempool.*``: Various mempool parameters **TODO**
|
||||
|
||||
- ``p2p.addr_book_file``: Peer address book. *Default*:
|
||||
``"$TMHOME/addrbook.json"``. **NOT USED**
|
||||
- ``p2p.laddr``: Node listen address. (0.0.0.0:0 means any interface,
|
||||
any port). *Default*: ``"0.0.0.0:46656"``
|
||||
- ``p2p.pex``: Enable Peer-Exchange (dev feature). *Default*: ``false``
|
||||
- ``p2p.seeds``: Comma delimited host:port seed nodes. *Default*:
|
||||
``""``
|
||||
- ``p2p.skip_upnp``: Skip UPNP detection. *Default*: ``false``
|
||||
|
||||
- ``rpc.grpc_laddr``: GRPC listen address (BroadcastTx only). Port
|
||||
required. *Default*: ``""``
|
||||
- ``rpc.laddr``: RPC listen address. Port required. *Default*:
|
||||
``"0.0.0.0:46657"``
|
||||
- ``rpc.unsafe``: Enabled unsafe rpc methods. *Default*: ``true``
|
34
docs/specification/fast-sync.rst
Normal file
34
docs/specification/fast-sync.rst
Normal file
@@ -0,0 +1,34 @@
|
||||
Fast Sync
|
||||
=========
|
||||
|
||||
Background
|
||||
----------
|
||||
|
||||
In a proof of work blockchain, syncing with the chain is the same
|
||||
process as staying up-to-date with the consensus: download blocks, and
|
||||
look for the one with the most total work. In proof-of-stake, the
|
||||
consensus process is more complex, as it involves rounds of
|
||||
communication between the nodes to determine what block should be
|
||||
committed next. Using this process to sync up with the blockchain from
|
||||
scratch can take a very long time. It's much faster to just download
|
||||
blocks and check the merkle tree of validators than to run the real-time
|
||||
consensus gossip protocol.
|
||||
|
||||
Fast Sync
|
||||
---------
|
||||
|
||||
To support faster syncing, tendermint offers a ``fast-sync`` mode, which
|
||||
is enabled by default, and can be toggled in the ``config.toml`` or via
|
||||
``--fast_sync=false``.
|
||||
|
||||
In this mode, the tendermint daemon will sync hundreds of times faster
|
||||
than if it used the real-time consensus process. Once caught up, the
|
||||
daemon will switch out of fast sync and into the normal consensus mode.
|
||||
After running for some time, the node is considered ``caught up`` if it
|
||||
has at least one peer and it's height is at least as high as the max
|
||||
reported peer height. See `the IsCaughtUp
|
||||
method <https://github.com/tendermint/tendermint/blob/b467515719e686e4678e6da4e102f32a491b85a0/blockchain/pool.go#L128>`__.
|
||||
|
||||
If we're lagging sufficiently, we should go back to fast syncing, but
|
||||
this is an open issue:
|
||||
https://github.com/tendermint/tendermint/issues/129
|
73
docs/specification/genesis.rst
Normal file
73
docs/specification/genesis.rst
Normal file
@@ -0,0 +1,73 @@
|
||||
Genesis
|
||||
=======
|
||||
|
||||
The genesis.json file in ``$TMROOT`` defines the initial TendermintCore
|
||||
state upon genesis of the blockchain (`see
|
||||
definition <https://github.com/tendermint/tendermint/blob/master/types/genesis.go>`__).
|
||||
|
||||
NOTE: This does not (yet) specify the application state (e.g. initial
|
||||
distribution of tokens). Currently we leave it up to the application to
|
||||
load the initial application genesis state. In the future, we may
|
||||
include genesis SetOption messages that get passed from TendermintCore
|
||||
to the app upon genesis.
|
||||
|
||||
Fields
|
||||
~~~~~~
|
||||
|
||||
- ``genesis_time``: Official time of blockchain start.
|
||||
- ``chain_id``: ID of the blockchain. This must be unique for every
|
||||
blockchain. If your testnet blockchains do not have unique chain IDs,
|
||||
you will have a bad time.
|
||||
- ``validators``:
|
||||
- ``pub_key``: The first element specifies the pub\_key type. 1 ==
|
||||
Ed25519. The second element are the pubkey bytes.
|
||||
- ``power``: The validator's voting power.
|
||||
- ``name``: Name of the validator (optional).
|
||||
- ``app_hash``: The expected application hash (as returned by the
|
||||
``Commit`` ABCI message) upon genesis. If the app's hash does not
|
||||
match, a warning message is printed.
|
||||
|
||||
Sample genesis.json
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"genesis_time": "2016-02-05T06:02:31.526Z",
|
||||
"chain_id": "chain-tTH4mi",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": [
|
||||
1,
|
||||
"9BC5112CB9614D91CE423FA8744885126CD9D08D9FC9D1F42E552D662BAA411E"
|
||||
],
|
||||
"power": 1,
|
||||
"name": "mach1"
|
||||
},
|
||||
{
|
||||
"pub_key": [
|
||||
1,
|
||||
"F46A5543D51F31660D9F59653B4F96061A740FF7433E0DC1ECBC30BE8494DE06"
|
||||
],
|
||||
"power": 1,
|
||||
"name": "mach2"
|
||||
},
|
||||
{
|
||||
"pub_key": [
|
||||
1,
|
||||
"0E7B423C1635FD07C0FC3603B736D5D27953C1C6CA865BB9392CD79DE1A682BB"
|
||||
],
|
||||
"power": 1,
|
||||
"name": "mach3"
|
||||
},
|
||||
{
|
||||
"pub_key": [
|
||||
1,
|
||||
"4F49237B9A32EB50682EDD83C48CE9CDB1D02A7CFDADCFF6EC8C1FAADB358879"
|
||||
],
|
||||
"power": 1,
|
||||
"name": "mach4"
|
||||
}
|
||||
],
|
||||
"app_hash": "15005165891224E721CB664D15CB972240F5703F"
|
||||
}
|
33
docs/specification/light-client-protocol.rst
Normal file
33
docs/specification/light-client-protocol.rst
Normal file
@@ -0,0 +1,33 @@
|
||||
Light Client Protocol
|
||||
=====================
|
||||
|
||||
Light clients are an important part of the complete blockchain system
|
||||
for most applications. Tendermint provides unique speed and security
|
||||
properties for light client applications.
|
||||
|
||||
See our developing `light-client
|
||||
repository <https://github.com/tendermint/light-client>`__.
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
The objective of the light client protocol is to get a
|
||||
`commit <./validators.html#committing-a-block>`__ for a recent
|
||||
`block hash <./block-structure.html#block-hash>`__ where the commit
|
||||
includes a majority of signatures from the last known validator set.
|
||||
From there, all the application state is verifiable with `merkle
|
||||
proofs <./merkle.html#iavl-tree>`__.
|
||||
|
||||
Properties
|
||||
----------
|
||||
|
||||
- You get the full collateralized security benefits of Tendermint; No
|
||||
need to wait for confirmations.
|
||||
- You get the full speed benefits of Tendermint; transactions commit
|
||||
instantly.
|
||||
- You can get the most recent version of the application state
|
||||
non-interactively (without committing anything to the blockchain).
|
||||
For example, this means that you can get the most recent value of a
|
||||
name from the name-registry without worrying about fork censorship
|
||||
attacks, without posting a commit and waiting for confirmations. It's
|
||||
fast, secure, and free!
|
88
docs/specification/merkle.rst
Normal file
88
docs/specification/merkle.rst
Normal file
@@ -0,0 +1,88 @@
|
||||
Merkle
|
||||
======
|
||||
|
||||
For an overview of Merkle trees, see
|
||||
`wikipedia <https://en.wikipedia.org/wiki/Merkle_tree>`__.
|
||||
|
||||
There are two types of Merkle trees used in Tendermint.
|
||||
|
||||
- ```IAVL+ Tree`` <#iavl-tree>`__: An immutable self-balancing binary
|
||||
tree for persistent application state
|
||||
- ```Simple Tree`` <#simple-tree>`__: A simple compact binary tree for
|
||||
a static list of items
|
||||
|
||||
IAVL+ Tree
|
||||
----------
|
||||
|
||||
The purpose of this data structure is to provide persistent storage for
|
||||
key-value pairs (e.g. account state, name-registrar data, and
|
||||
per-contract data) such that a deterministic merkle root hash can be
|
||||
computed. The tree is balanced using a variant of the `AVL
|
||||
algorithm <http://en.wikipedia.org/wiki/AVL_tree>`__ so all operations
|
||||
are O(log(n)).
|
||||
|
||||
Nodes of this tree are immutable and indexed by its hash. Thus any node
|
||||
serves as an immutable snapshot which lets us stage uncommitted
|
||||
transactions from the mempool cheaply, and we can instantly roll back to
|
||||
the last committed state to process transactions of a newly committed
|
||||
block (which may not be the same set of transactions as those from the
|
||||
mempool).
|
||||
|
||||
In an AVL tree, the heights of the two child subtrees of any node differ
|
||||
by at most one. Whenever this condition is violated upon an update, the
|
||||
tree is rebalanced by creating O(log(n)) new nodes that point to
|
||||
unmodified nodes of the old tree. In the original AVL algorithm, inner
|
||||
nodes can also hold key-value pairs. The AVL+ algorithm (note the plus)
|
||||
modifies the AVL algorithm to keep all values on leaf nodes, while only
|
||||
using branch-nodes to store keys. This simplifies the algorithm while
|
||||
minimizing the size of merkle proofs
|
||||
|
||||
In Ethereum, the analog is the `Patricia
|
||||
trie <http://en.wikipedia.org/wiki/Radix_tree>`__. There are tradeoffs.
|
||||
Keys do not need to be hashed prior to insertion in IAVL+ trees, so this
|
||||
provides faster iteration in the key space which may benefit some
|
||||
applications. The logic is simpler to implement, requiring only two
|
||||
types of nodes -- inner nodes and leaf nodes. The IAVL+ tree is a binary
|
||||
tree, so merkle proofs are much shorter than the base 16 Patricia trie.
|
||||
On the other hand, while IAVL+ trees provide a deterministic merkle root
|
||||
hash, it depends on the order of updates. In practice this shouldn't be
|
||||
a problem, since you can efficiently encode the tree structure when
|
||||
serializing the tree contents.
|
||||
|
||||
Simple Tree
|
||||
-----------
|
||||
|
||||
For merkelizing smaller static lists, use the Simple Tree. The
|
||||
transactions and validation signatures of a block are hashed using this
|
||||
simple merkle tree logic.
|
||||
|
||||
If the number of items is not a power of two, the tree will not be full
|
||||
and some leaf nodes will be at different levels. Simple Tree tries to
|
||||
keep both sides of the tree the same size, but the left side may be one
|
||||
greater.
|
||||
|
||||
::
|
||||
|
||||
Simple Tree with 6 items Simple Tree with 7 items
|
||||
|
||||
* *
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
/ \ / \
|
||||
* * * *
|
||||
/ \ / \ / \ / \
|
||||
/ \ / \ / \ / \
|
||||
/ \ / \ / \ / \
|
||||
* h2 * h5 * * * h6
|
||||
/ \ / \ / \ / \ / \
|
||||
h0 h1 h3 h4 h0 h1 h2 h3 h4 h5
|
||||
|
||||
Simple Tree with Dictionaries
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The Simple Tree is used to merkelize a list of items, so to merkelize a
|
||||
(short) dictionary of key-value pairs, encode the dictionary as an
|
||||
ordered list of ``KVPair`` structs. The block hash is such a hash
|
||||
derived from all the fields of the block ``Header``. The state hash is
|
||||
similarly derived.
|
188
docs/specification/rpc.rst
Normal file
188
docs/specification/rpc.rst
Normal file
@@ -0,0 +1,188 @@
|
||||
RPC
|
||||
===
|
||||
|
||||
Coming soon: RPC docs powered by `slate <https://github.com/lord/slate>`__. Until then, read on.
|
||||
|
||||
Tendermint supports the following RPC protocols:
|
||||
|
||||
- URI over HTTP
|
||||
- JSONRPC over HTTP
|
||||
- JSONRPC over websockets
|
||||
|
||||
Tendermint RPC is build using `our own RPC
|
||||
library <https://github.com/tendermint/tendermint/tree/master/rpc/lib>`__.
|
||||
Documentation and tests for that library could be found at
|
||||
``tendermint/rpc/lib`` directory.
|
||||
|
||||
Configuration
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Set the ``laddr`` config parameter under ``[rpc]`` table in the
|
||||
$TMHOME/config.toml file or the ``--rpc.laddr`` command-line flag to the
|
||||
desired protocol://host:port setting. Default: ``tcp://0.0.0.0:46657``.
|
||||
|
||||
Arguments
|
||||
~~~~~~~~~
|
||||
|
||||
Arguments which expect strings or byte arrays may be passed as quoted
|
||||
strings, like ``"abc"`` or as ``0x``-prefixed strings, like
|
||||
``0x616263``.
|
||||
|
||||
URI/HTTP
|
||||
~~~~~~~~
|
||||
|
||||
Example request:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
curl -s 'http://localhost:46657/broadcast_tx_sync?tx="abc"' | jq .
|
||||
|
||||
Response:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"error": "",
|
||||
"result": {
|
||||
"hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF",
|
||||
"log": "",
|
||||
"data": "",
|
||||
"code": 0
|
||||
},
|
||||
"id": "",
|
||||
"jsonrpc": "2.0"
|
||||
}
|
||||
|
||||
The first entry in the result-array (``96``) is the method this response
|
||||
correlates with. ``96`` refers to "ResultTypeBroadcastTx", see
|
||||
`responses.go <https://github.com/tendermint/tendermint/blob/master/rpc/core/types/responses.go>`__
|
||||
for a complete overview.
|
||||
|
||||
JSONRPC/HTTP
|
||||
~~~~~~~~~~~~
|
||||
|
||||
JSONRPC requests can be POST'd to the root RPC endpoint via HTTP (e.g.
|
||||
``http://localhost:46657/``).
|
||||
|
||||
Example request:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"method": "broadcast_tx_sync",
|
||||
"jsonrpc": "2.0",
|
||||
"params": [ "abc" ],
|
||||
"id": "dontcare"
|
||||
}
|
||||
|
||||
JSONRPC/websockets
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
JSONRPC requests can be made via websocket. The websocket endpoint is at
|
||||
``/websocket``, e.g. ``http://localhost:46657/websocket``. Asynchronous
|
||||
RPC functions like event ``subscribe`` and ``unsubscribe`` are only
|
||||
available via websockets.
|
||||
|
||||
Endpoints
|
||||
~~~~~~~~~
|
||||
|
||||
An HTTP Get request to the root RPC endpoint (e.g.
|
||||
``http://localhost:46657``) shows a list of available endpoints.
|
||||
|
||||
::
|
||||
|
||||
Available endpoints:
|
||||
http://localhost:46657/abci_info
|
||||
http://localhost:46657/dump_consensus_state
|
||||
http://localhost:46657/genesis
|
||||
http://localhost:46657/net_info
|
||||
http://localhost:46657/num_unconfirmed_txs
|
||||
http://localhost:46657/status
|
||||
http://localhost:46657/unconfirmed_txs
|
||||
http://localhost:46657/unsafe_flush_mempool
|
||||
http://localhost:46657/unsafe_stop_cpu_profiler
|
||||
http://localhost:46657/validators
|
||||
|
||||
Endpoints that require arguments:
|
||||
http://localhost:46657/abci_query?path=_&data=_&prove=_
|
||||
http://localhost:46657/block?height=_
|
||||
http://localhost:46657/blockchain?minHeight=_&maxHeight=_
|
||||
http://localhost:46657/broadcast_tx_async?tx=_
|
||||
http://localhost:46657/broadcast_tx_commit?tx=_
|
||||
http://localhost:46657/broadcast_tx_sync?tx=_
|
||||
http://localhost:46657/commit?height=_
|
||||
http://localhost:46657/dial_seeds?seeds=_
|
||||
http://localhost:46657/subscribe?event=_
|
||||
http://localhost:46657/tx?hash=_&prove=_
|
||||
http://localhost:46657/unsafe_start_cpu_profiler?filename=_
|
||||
http://localhost:46657/unsafe_write_heap_profile?filename=_
|
||||
http://localhost:46657/unsubscribe?event=_
|
||||
|
||||
tx
|
||||
~~
|
||||
|
||||
Returns a transaction matching the given transaction hash.
|
||||
|
||||
**Parameters**
|
||||
|
||||
1. hash - the transaction hash
|
||||
2. prove - include a proof of the transaction inclusion in the block in
|
||||
the result (optional, default: false)
|
||||
|
||||
**Returns**
|
||||
|
||||
- ``proof``: the ``types.TxProof`` object
|
||||
- ``tx``: ``[]byte`` - the transaction
|
||||
- ``tx_result``: the ``abci.Result`` object
|
||||
- ``index``: ``int`` - index of the transaction
|
||||
- ``height``: ``int`` - height of the block where this transaction was
|
||||
in
|
||||
|
||||
**Example**
|
||||
|
||||
.. code:: bash
|
||||
|
||||
curl -s 'http://localhost:46657/broadcast_tx_commit?tx="abc"' | jq .
|
||||
# {
|
||||
# "error": "",
|
||||
# "result": {
|
||||
# "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF",
|
||||
# "log": "",
|
||||
# "data": "",
|
||||
# "code": 0
|
||||
# },
|
||||
# "id": "",
|
||||
# "jsonrpc": "2.0"
|
||||
# }
|
||||
|
||||
curl -s 'http://localhost:46657/tx?hash=0x2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF' | jq .
|
||||
# {
|
||||
# "error": "",
|
||||
# "result": {
|
||||
# "proof": {
|
||||
# "Proof": {
|
||||
# "aunts": []
|
||||
# },
|
||||
# "Data": "YWJjZA==",
|
||||
# "RootHash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF",
|
||||
# "Total": 1,
|
||||
# "Index": 0
|
||||
# },
|
||||
# "tx": "YWJjZA==",
|
||||
# "tx_result": {
|
||||
# "log": "",
|
||||
# "data": "",
|
||||
# "code": 0
|
||||
# },
|
||||
# "index": 0,
|
||||
# "height": 52
|
||||
# },
|
||||
# "id": "",
|
||||
# "jsonrpc": "2.0"
|
||||
# }
|
||||
|
||||
More Examples
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
See the various bash tests using curl in ``test/``, and examples using
|
||||
the ``Go`` API in ``rpc/client/``.
|
73
docs/specification/secure-p2p.rst
Normal file
73
docs/specification/secure-p2p.rst
Normal file
@@ -0,0 +1,73 @@
|
||||
Secure P2P
|
||||
==========
|
||||
|
||||
The Tendermint p2p protocol uses an authenticated encryption scheme
|
||||
based on the `Station-to-Station
|
||||
Protocol <https://en.wikipedia.org/wiki/Station-to-Station_protocol>`__.
|
||||
The implementation uses
|
||||
`golang's <https://godoc.org/golang.org/x/crypto/nacl/box>`__ `nacl
|
||||
box <http://nacl.cr.yp.to/box.html>`__ for the actual authenticated
|
||||
encryption algorithm.
|
||||
|
||||
Each peer generates an ED25519 key-pair to use as a persistent
|
||||
(long-term) id.
|
||||
|
||||
When two peers establish a TCP connection, they first each generate an
|
||||
ephemeral ED25519 key-pair to use for this session, and send each other
|
||||
their respective ephemeral public keys. This happens in the clear.
|
||||
|
||||
They then each compute the shared secret. The shared secret is the
|
||||
multiplication of the peer's ephemeral private key by the other peer's
|
||||
ephemeral public key. The result is the same for both peers by the magic
|
||||
of `elliptic
|
||||
curves <https://en.wikipedia.org/wiki/Elliptic_curve_cryptography>`__.
|
||||
The shared secret is used as the symmetric key for the encryption
|
||||
algorithm.
|
||||
|
||||
The two ephemeral public keys are sorted to establish a canonical order.
|
||||
Then a 24-byte nonce is generated by concatenating the public keys and
|
||||
hashing them with Ripemd160. Note Ripemd160 produces 20byte hashes, so
|
||||
the nonce ends with four 0s.
|
||||
|
||||
The nonce is used to seed the encryption - it is critical that the same
|
||||
nonce never be used twice with the same private key. For convenience,
|
||||
the last bit of the nonce is flipped, giving us two nonces: one for
|
||||
encrypting our own messages, one for decrypting our peer's. Which ever
|
||||
peer has the higher public key uses the "bit-flipped" nonce for
|
||||
encryption.
|
||||
|
||||
Now, a challenge is generated by concatenating the ephemeral public keys
|
||||
and taking the SHA256 hash.
|
||||
|
||||
Each peer signs the challenge with their persistent private key, and
|
||||
sends the other peer an AuthSigMsg, containing their persistent public
|
||||
key and the signature. On receiving an AuthSigMsg, the peer verifies the
|
||||
signature.
|
||||
|
||||
The peers are now authenticated.
|
||||
|
||||
All future communications can now be encrypted using the shared secret
|
||||
and the generated nonces, where each nonce is incremented by one each
|
||||
time it is used. The communications maintain Perfect Forward Secrecy, as
|
||||
the persistent key pair was not used for generating secrets - only for
|
||||
authenticating.
|
||||
|
||||
Caveat
|
||||
------
|
||||
|
||||
This system is still vulnerable to a Man-In-The-Middle attack if the
|
||||
persistent public key of the remote node is not known in advance. The
|
||||
only way to mitigate this is with a public key authentication system,
|
||||
such as the Web-of-Trust or Certificate Authorities. In our case, we can
|
||||
use the blockchain itself as a certificate authority to ensure that we
|
||||
are connected to at least one validator.
|
||||
|
||||
Additional Reading
|
||||
------------------
|
||||
|
||||
- `Implementation <https://github.com/tendermint/go-p2p/blob/master/secret_connection.go#L49>`__
|
||||
- `Original STS paper by Whitfield Diffie, Paul C. van Oorschot and
|
||||
Michael J.
|
||||
Wiener <http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.216.6107&rep=rep1&type=pdf>`__
|
||||
- `Further work on secret
|
||||
handshakes <https://dominictarr.github.io/secret-handshake-paper/shs.pdf>`__
|
44
docs/specification/validators.rst
Normal file
44
docs/specification/validators.rst
Normal file
@@ -0,0 +1,44 @@
|
||||
Validators
|
||||
==========
|
||||
|
||||
Validators are responsible for committing new blocks in the blockchain.
|
||||
These validators participate in the consensus protocol by broadcasting
|
||||
*votes* which contain cryptographic signatures signed by each
|
||||
validator's public key.
|
||||
|
||||
Some Proof-of-Stake consensus algorithms aim to create a "completely"
|
||||
decentralized system where all stakeholders (even those who are not
|
||||
always available online) participate in the committing of blocks.
|
||||
Tendermint has a different approach to block creation. Validators are
|
||||
expected to be online, and the set of validators is permissioned/curated
|
||||
by some external process. Proof-of-stake is not required, but can be
|
||||
implemented on top of Tendermint consensus. That is, validators may be
|
||||
required to post collateral on-chain, off-chain, or may not be required
|
||||
to post any collateral at all.
|
||||
|
||||
Validators have a cryptographic key-pair and an associated amount of
|
||||
"voting power". Voting power need not be the same.
|
||||
|
||||
Becoming a Validator
|
||||
--------------------
|
||||
|
||||
There are two ways to become validator.
|
||||
|
||||
1. They can be pre-established in the `genesis
|
||||
state <./genesis.html>`__
|
||||
2. The `ABCI app responds to the EndBlock
|
||||
message <https://github.com/tendermint/abci>`__ with changes to the
|
||||
existing validator set.
|
||||
|
||||
Committing a Block
|
||||
------------------
|
||||
|
||||
*+2/3 is short for "more than 2/3"*
|
||||
|
||||
A block is committed when +2/3 of the validator set sign `precommit
|
||||
votes <./block-structure.html#vote>`__ for that block at the same
|
||||
``round``. The +2/3 set of precommit votes is
|
||||
called a `*commit* <./block-structure.html#commit>`__. While any
|
||||
+2/3 set of precommits for the same block at the same height&round can
|
||||
serve as validation, the canonical commit is included in the next block
|
||||
(see `LastCommit <./block-structure.html>`__).
|
172
docs/specification/wire-protocol.rst
Normal file
172
docs/specification/wire-protocol.rst
Normal file
@@ -0,0 +1,172 @@
|
||||
Wire Protocol
|
||||
=============
|
||||
|
||||
The `Tendermint wire protocol <https://github.com/tendermint/go-wire>`__
|
||||
encodes data in `c-style binary <#binary>`__ and `JSON <#json>`__ form.
|
||||
|
||||
Supported types
|
||||
---------------
|
||||
|
||||
- Primitive types
|
||||
- ``uint8`` (aka ``byte``), ``uint16``, ``uint32``, ``uint64``
|
||||
- ``int8``, ``int16``, ``int32``, ``int64``
|
||||
- ``uint``, ``int``: variable length (un)signed integers
|
||||
- ``string``, ``[]byte``
|
||||
- ``time``
|
||||
- Derived types
|
||||
- structs
|
||||
- var-length arrays of a particular type
|
||||
- fixed-length arrays of a particular type
|
||||
- interfaces: registered union types preceded by a ``type byte``
|
||||
- pointers
|
||||
|
||||
Binary
|
||||
------
|
||||
|
||||
**Fixed-length primitive types** are encoded with 1,2,3, or 4 big-endian
|
||||
bytes. - ``uint8`` (aka ``byte``), ``uint16``, ``uint32``, ``uint64``:
|
||||
takes 1,2,3, and 4 bytes respectively - ``int8``, ``int16``, ``int32``,
|
||||
``int64``: takes 1,2,3, and 4 bytes respectively - ``time``: ``int64``
|
||||
representation of nanoseconds since epoch
|
||||
|
||||
**Variable-length integers** are encoded with a single leading byte
|
||||
representing the length of the following big-endian bytes. For signed
|
||||
negative integers, the most significant bit of the leading byte is a 1.
|
||||
|
||||
- ``uint``: 1-byte length prefixed variable-size (0 ~ 255 bytes)
|
||||
unsigned integers
|
||||
- ``int``: 1-byte length prefixed variable-size (0 ~ 127 bytes) signed
|
||||
integers
|
||||
|
||||
NOTE: While the number 0 (zero) is encoded with a single byte ``x00``,
|
||||
the number 1 (one) takes two bytes to represent: ``x0101``. This isn't
|
||||
the most efficient representation, but the rules are easier to remember.
|
||||
|
||||
+---------------+----------------+----------------+
|
||||
| number | binary | binary ``int`` |
|
||||
| | ``uint`` | |
|
||||
+===============+================+================+
|
||||
| 0 | ``x00`` | ``x00`` |
|
||||
+---------------+----------------+----------------+
|
||||
| 1 | ``x0101`` | ``x0101`` |
|
||||
+---------------+----------------+----------------+
|
||||
| 2 | ``x0102`` | ``x0102`` |
|
||||
+---------------+----------------+----------------+
|
||||
| 256 | ``x020100`` | ``x020100`` |
|
||||
+---------------+----------------+----------------+
|
||||
| 2^(127\ *8)-1 | ``x800100...`` | overflow |
|
||||
| \| | | |
|
||||
| ``x7FFFFF...` | | |
|
||||
| ` | | |
|
||||
| \| | | |
|
||||
| ``x7FFFFF...` | | |
|
||||
| ` | | |
|
||||
| \| \| | | |
|
||||
| 2^(127*\ 8) | | |
|
||||
+---------------+----------------+----------------+
|
||||
| 2^(255\*8)-1 |
|
||||
| \| |
|
||||
| ``xFFFFFF...` |
|
||||
| ` |
|
||||
| \| overflow |
|
||||
| \| \| -1 \| |
|
||||
| n/a \| |
|
||||
| ``x8101`` \| |
|
||||
| \| -2 \| n/a |
|
||||
| \| ``x8102`` |
|
||||
| \| \| -256 \| |
|
||||
| n/a \| |
|
||||
| ``x820100`` |
|
||||
| \| |
|
||||
+---------------+----------------+----------------+
|
||||
|
||||
**Structures** are encoded by encoding the field values in order of
|
||||
declaration.
|
||||
|
||||
.. code:: go
|
||||
|
||||
type Foo struct {
|
||||
MyString string
|
||||
MyUint32 uint32
|
||||
}
|
||||
var foo = Foo{"626172", math.MaxUint32}
|
||||
|
||||
/* The binary representation of foo:
|
||||
0103626172FFFFFFFF
|
||||
0103: `int` encoded length of string, here 3
|
||||
626172: 3 bytes of string "bar"
|
||||
FFFFFFFF: 4 bytes of uint32 MaxUint32
|
||||
*/
|
||||
|
||||
**Variable-length arrays** are encoded with a leading ``int`` denoting
|
||||
the length of the array followed by the binary representation of the
|
||||
items. **Fixed-length arrays** are similar but aren't preceded by the
|
||||
leading ``int``.
|
||||
|
||||
.. code:: go
|
||||
|
||||
foos := []Foo{foo, foo}
|
||||
|
||||
/* The binary representation of foos:
|
||||
01020103626172FFFFFFFF0103626172FFFFFFFF
|
||||
0102: `int` encoded length of array, here 2
|
||||
0103626172FFFFFFFF: the first `foo`
|
||||
0103626172FFFFFFFF: the second `foo`
|
||||
*/
|
||||
|
||||
foos := [2]Foo{foo, foo} // fixed-length array
|
||||
|
||||
/* The binary representation of foos:
|
||||
0103626172FFFFFFFF0103626172FFFFFFFF
|
||||
0103626172FFFFFFFF: the first `foo`
|
||||
0103626172FFFFFFFF: the second `foo`
|
||||
*/
|
||||
|
||||
**Interfaces** can represent one of any number of concrete types. The
|
||||
concrete types of an interface must first be declared with their
|
||||
corresponding ``type byte``. An interface is then encoded with the
|
||||
leading ``type byte``, then the binary encoding of the underlying
|
||||
concrete type.
|
||||
|
||||
NOTE: The byte ``x00`` is reserved for the ``nil`` interface value and
|
||||
``nil`` pointer values.
|
||||
|
||||
.. code:: go
|
||||
|
||||
type Animal interface{}
|
||||
type Dog uint32
|
||||
type Cat string
|
||||
|
||||
RegisterInterface(
|
||||
struct{ Animal }{}, // Convenience for referencing the 'Animal' interface
|
||||
ConcreteType{Dog(0), 0x01}, // Register the byte 0x01 to denote a Dog
|
||||
ConcreteType{Cat(""), 0x02}, // Register the byte 0x02 to denote a Cat
|
||||
)
|
||||
|
||||
var animal Animal = Dog(02)
|
||||
|
||||
/* The binary representation of animal:
|
||||
010102
|
||||
01: the type byte for a `Dog`
|
||||
0102: the bytes of Dog(02)
|
||||
*/
|
||||
|
||||
**Pointers** are encoded with a single leading byte ``x00`` for ``nil``
|
||||
pointers, otherwise encoded with a leading byte ``x01`` followed by the
|
||||
binary encoding of the value pointed to.
|
||||
|
||||
NOTE: It's easy to convert pointer types into interface types, since the
|
||||
``type byte`` ``x00`` is always ``nil``.
|
||||
|
||||
JSON
|
||||
----
|
||||
|
||||
The JSON codec is compatible with the ```binary`` <#binary>`__ codec,
|
||||
and is fairly intuitive if you're already familiar with golang's JSON
|
||||
encoding. Some quirks are noted below:
|
||||
|
||||
- variable-length and fixed-length bytes are encoded as uppercase
|
||||
hexadecimal strings
|
||||
- interface values are encoded as an array of two items:
|
||||
``[type_byte, concrete_value]``
|
||||
- times are encoded as rfc2822 strings
|
356
docs/using-tendermint.rst
Normal file
356
docs/using-tendermint.rst
Normal file
@@ -0,0 +1,356 @@
|
||||
Using Tendermint
|
||||
================
|
||||
|
||||
This is a guide to using the ``tendermint`` program from the command
|
||||
line. It assumes only that you have the ``tendermint`` binary installed
|
||||
and have some rudimentary idea of what Tendermint and ABCI are.
|
||||
|
||||
You can see the help menu with ``tendermint --help``, and the version
|
||||
number with ``tendermint version``.
|
||||
|
||||
Directory Root
|
||||
--------------
|
||||
|
||||
The default directory for blockchain data is ``~/.tendermint``. Override
|
||||
this by setting the ``TMROOT`` environment variable.
|
||||
|
||||
Initialize
|
||||
----------
|
||||
|
||||
Initialize the root directory by running:
|
||||
|
||||
::
|
||||
|
||||
tendermint init
|
||||
|
||||
This will create a new private key (``priv_validator.json``), and a
|
||||
genesis file (``genesis.json``) containing the associated public key.
|
||||
This is all that's necessary to run a local testnet with one validator.
|
||||
|
||||
For more elaborate initialization, see our `testnet deployment
|
||||
tool <https://github.com/tendermint/tools/tree/master/mintnet-kubernetes>`__.
|
||||
|
||||
Run
|
||||
---
|
||||
|
||||
To run a tendermint node, use
|
||||
|
||||
::
|
||||
|
||||
tendermint node
|
||||
|
||||
By default, Tendermint will try to connect to a abci appliction on
|
||||
`127.0.0.1:46658 <127.0.0.1:46658>`__. If you have the ``dummy`` ABCI
|
||||
app installed, run it in another window. If you don't, kill tendermint
|
||||
and run an in-process version with
|
||||
|
||||
::
|
||||
|
||||
tendermint node --proxy_app=dummy
|
||||
|
||||
After a few seconds you should see blocks start streaming in. Note that
|
||||
blocks are produced regularly, even if there are no transactions. This
|
||||
changes `with this pull
|
||||
request <https://github.com/tendermint/tendermint/pull/584>`__.
|
||||
|
||||
Tendermint supports in-process versions of the dummy, counter, and nil
|
||||
apps that ship as examples in the `ABCI
|
||||
repository <https://github.com/tendermint/abci>`__. It's easy to compile
|
||||
your own app in-process with tendermint if it's written in Go. If your
|
||||
app is not written in Go, simply run it in another process, and use the
|
||||
``--proxy_app`` flag to specify the address of the socket it is
|
||||
listening on, for instance
|
||||
|
||||
::
|
||||
|
||||
tendermint node --proxy_app=/var/run/abci.sock
|
||||
|
||||
Transactions
|
||||
------------
|
||||
|
||||
To send a transaction, use ``curl`` to make requests to the Tendermint
|
||||
RPC server:
|
||||
|
||||
::
|
||||
|
||||
curl http://localhost:46657/broadcast_tx_commit?tx=\"abcd\"
|
||||
|
||||
For handling responses, we recommend you `install the jsonpp
|
||||
tool <http://jmhodges.github.io/jsonpp/>`__ to pretty print the JSON.
|
||||
|
||||
We can see the chain's status at the ``/status`` end-point:
|
||||
|
||||
::
|
||||
|
||||
curl http://localhost:46657/status | jsonpp
|
||||
|
||||
and the ``latest_app_hash`` in particular:
|
||||
|
||||
::
|
||||
|
||||
curl http://localhost:46657/status | jsonpp | grep app_hash
|
||||
|
||||
Visit http://localhost:46657 in your browser to see the list of other
|
||||
endpoints. Some take no arguments (like ``/status``), while others
|
||||
specify the argument name and use ``_`` as a placeholder.
|
||||
|
||||
Reset
|
||||
-----
|
||||
|
||||
**WARNING: UNSAFE** Only do this in development and only if you can
|
||||
afford to lose all blockchain data!
|
||||
|
||||
To reset a blockchain, stop the node, remove the ``~/.tendermint/data``
|
||||
directory and run
|
||||
|
||||
::
|
||||
|
||||
tendermint unsafe_reset_priv_validator
|
||||
|
||||
This final step is necessary to reset the ``priv_validator.json``, which
|
||||
otherwise prevents you from making conflicting votes in the consensus
|
||||
(something that could get you in trouble if you do it on a real
|
||||
blockchain). If you don't reset the ``priv_validator.json``, your fresh
|
||||
new blockchain will not make any blocks.
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
Tendermint uses a ``config.toml`` for configutation. For details, see
|
||||
`the documentation <./specification/configuration.html>`__.
|
||||
|
||||
Notable options include the socket address of the application
|
||||
(``proxy_app``), the listenting address of the tendermint peer
|
||||
(``p2p.laddr``), and the listening address of the rpc server
|
||||
(``rpc.laddr``).
|
||||
|
||||
Some fields from the config file can be overwritten with flags.
|
||||
|
||||
Broadcast API
|
||||
-------------
|
||||
|
||||
Earlier, we used the ``broadcast_tx_commit`` endpoint to send a
|
||||
transaction. When a transaction is sent to a tendermint node, it will
|
||||
run via ``CheckTx`` against the application. If it passes ``CheckTx``,
|
||||
it will be included in the mempool, broadcast to other peers, and
|
||||
eventually included in a block.
|
||||
|
||||
Since there are multiple phases to processing a transaction, we offer
|
||||
multiple endpoints to broadcast a transaction:
|
||||
|
||||
::
|
||||
|
||||
/broadcast_tx_async
|
||||
/broadcast_tx_sync
|
||||
/broadcast_tx_commit
|
||||
|
||||
These correspond to no-processing, processing through the mempool, and
|
||||
processing through a block, respectively. That is,
|
||||
``broadcast_tx_async``, will return right away without waiting to hear
|
||||
if the transaction is even valid, while ``broadcast_tx_sync`` will
|
||||
return with the result of running the transaction through ``CheckTx``.
|
||||
Using ``broadcast_tx_commit`` will wait until the transaction is
|
||||
committed in a block or until some timeout is reached, but will return
|
||||
right away if the transaction does not pass ``CheckTx``. The return
|
||||
value for ``broadcast_tx_commit`` includes two fields, ``check_tx`` and
|
||||
``deliver_tx``, pertaining to the result of running the transaction
|
||||
through those ABCI messages.
|
||||
|
||||
The benefit of using ``broadcast_tx_commit`` is that the request returns
|
||||
after the transaction is committed (ie. included in a block), but that
|
||||
can take on the order of a second. For a quick result, use
|
||||
``broadcast_tx_sync``, but the transaction will not be committed until
|
||||
later, and by that point its effect on the state may change.
|
||||
|
||||
Tendermint Networks
|
||||
-------------------
|
||||
|
||||
When ``tendermint init`` is run, both a ``genesis.json`` and
|
||||
``priv_validator.json`` are created in ``~/.tendermint``. The
|
||||
``genesis.json`` might look like:
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"app_hash": "",
|
||||
"chain_id": "test-chain-HZw6TB",
|
||||
"genesis_time": "0001-01-01T00:00:00.000Z",
|
||||
"validators": [
|
||||
{
|
||||
"power": 10,
|
||||
"name": "",
|
||||
"pub_key": [
|
||||
1,
|
||||
"5770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
And the ``priv_validator.json``:
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"address": "4F4D895F882A18E1D1FC608D102601DA8D3570E5",
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_signature": null,
|
||||
"last_signbytes": "",
|
||||
"last_step": 0,
|
||||
"priv_key": [
|
||||
1,
|
||||
"F9FA3CD435BDAE54D0BCA8F1BC289D718C23D855C6DB21E8543F5E4F457E62805770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E"
|
||||
],
|
||||
"pub_key": [
|
||||
1,
|
||||
"5770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E"
|
||||
]
|
||||
}
|
||||
|
||||
The ``priv_validator.json`` actually contains a private key, and should
|
||||
thus be kept absolutely secret; for now we work with the plain text.
|
||||
Note the ``last_`` fields, which are used to prevent us from signing
|
||||
conflicting messages.
|
||||
|
||||
Note also that the ``pub_key`` (the public key) in the
|
||||
``priv_validator.json`` is also present in the ``genesis.json``.
|
||||
|
||||
The genesis file contains the list of public keys which may participate
|
||||
in the consensus, and their corresponding voting power. Greater than 2/3
|
||||
of the voting power must be active (ie. the corresponding private keys
|
||||
must be producing signatures) for the consensus to make progress. In our
|
||||
case, the genesis file contains the public key of our
|
||||
``priv_validator.json``, so a tendermint node started with the default
|
||||
root directory will be able to make new blocks, as we've already seen.
|
||||
|
||||
If we want to add more nodes to the network, we have two choices: we can
|
||||
add a new validator node, who will also participate in the consensus by
|
||||
proposing blocks and voting on them, or we can add a new non-validator
|
||||
node, who will not participate directly, but will verify and keep up
|
||||
with the consensus protocol.
|
||||
|
||||
Peers
|
||||
~~~~~
|
||||
|
||||
To connect to peers on start-up, specify them in the ``config.toml`` or
|
||||
on the command line.
|
||||
|
||||
For instance,
|
||||
|
||||
::
|
||||
|
||||
tendermint node --p2p.seeds "1.2.3.4:46656,5.6.7.8:46656"
|
||||
|
||||
Alternatively, you can use the ``/dial_seeds`` endpoint of the RPC to
|
||||
specify peers for a running node to connect to:
|
||||
|
||||
::
|
||||
|
||||
curl --data-urlencode "seeds=[\"1.2.3.4:46656\",\"5.6.7.8:46656\"]" localhost:46657/dial_seeds
|
||||
|
||||
Additionally, the peer-exchange protocol can be enabled using the
|
||||
``--pex`` flag, though this feature is `still under
|
||||
development <https://github.com/tendermint/tendermint/issues/598>`__ If
|
||||
``--pex`` is enabled, peers will gossip about known peers and form a
|
||||
more resilient network.
|
||||
|
||||
Adding a Non-Validator
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Adding a non-validator is simple. Just copy the original
|
||||
``genesis.json`` to ``~/.tendermint`` on the new machine and start the
|
||||
node, specifying seeds as necessary. If no seeds are specified, the node
|
||||
won't make any blocks, because it's not a validator, and it won't hear
|
||||
about any blocks, because it's not connected to the other peer.
|
||||
|
||||
Adding a Validator
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The easiest way to add new validators is to do it in the
|
||||
``genesis.json``, before starting the network. For instance, we could
|
||||
make a new ``priv_validator.json``, and copy it's ``pub_key`` into the
|
||||
above genesis.
|
||||
|
||||
We can generate a new ``priv_validator.json`` with the command:
|
||||
|
||||
::
|
||||
|
||||
tendermint gen_validator
|
||||
|
||||
Now we can update our genesis file. For instance, if the new
|
||||
``priv_validator.json`` looks like:
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"address": "AC379688105901436A34A65F185C115B8BB277A1",
|
||||
"last_height": 0,
|
||||
"last_round": 0,
|
||||
"last_signature": null,
|
||||
"last_signbytes": "",
|
||||
"last_step": 0,
|
||||
"priv_key": [
|
||||
1,
|
||||
"0D2ED337D748ADF79BE28559B9E59EBE1ABBA0BAFE6D65FCB9797985329B950C8F2B5AACAACC9FCE41881349743B0CFDE190DF0177744568D4E82A18F0B7DF94"
|
||||
],
|
||||
"pub_key": [
|
||||
1,
|
||||
"8F2B5AACAACC9FCE41881349743B0CFDE190DF0177744568D4E82A18F0B7DF94"
|
||||
]
|
||||
}
|
||||
|
||||
then the new ``genesis.json`` will be:
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"app_hash": "",
|
||||
"chain_id": "test-chain-HZw6TB",
|
||||
"genesis_time": "0001-01-01T00:00:00.000Z",
|
||||
"validators": [
|
||||
{
|
||||
"power": 10,
|
||||
"name": "",
|
||||
"pub_key": [
|
||||
1,
|
||||
"5770B4DD55B3E08B7F5711C48B516347D8C33F47C30C226315D21AA64E0DFF2E"
|
||||
]
|
||||
},
|
||||
{
|
||||
"power": 10,
|
||||
"name": "",
|
||||
"pub_key": [
|
||||
1,
|
||||
"8F2B5AACAACC9FCE41881349743B0CFDE190DF0177744568D4E82A18F0B7DF94"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Update the ``genesis.json`` in ``~/.tendermint``. Copy the genesis file
|
||||
and the new ``priv_validator.json`` to the ``~/.tendermint`` on a new
|
||||
machine.
|
||||
|
||||
Now run ``tendermint node`` on both machines, and use either
|
||||
``--p2p.seeds`` or the ``/dial_seeds`` to get them to peer up. They
|
||||
should start making blocks, and will only continue to do so as long as
|
||||
both of them are online.
|
||||
|
||||
To make a Tendermint network that can tolerate one of the validators
|
||||
failing, you need at least four validator nodes (> 2/3).
|
||||
|
||||
Updating validators in a live network is supported but must be
|
||||
explicitly programmed by the application developer. See the `application
|
||||
developers guide <./app-development.html>`__ for more
|
||||
details.
|
||||
|
||||
Local Network
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
To run a network locally, say on a single machine, you must change the
|
||||
``_laddr`` fields in the ``config.toml`` (or using the flags) so that
|
||||
the listening addresses of the various sockets don't conflict.
|
||||
Additionally, you must set ``addrbook_strict=false`` in the
|
||||
``config.toml``, otherwise Tendermint's p2p library will deny making
|
||||
connections to peers with the same IP address.
|
87
glide.lock
generated
87
glide.lock
generated
@@ -1,41 +1,48 @@
|
||||
hash: 2c988aae9517b386ee911e4da5deb9f5034359b7e2ccf448952a3ddb9771222d
|
||||
updated: 2017-06-28T13:04:20.907047164+02:00
|
||||
hash: e3649cac7b1b9a23c024a9d1bbebd5a147861d55da2bca77c95129b6021850b4
|
||||
updated: 2017-09-22T13:24:29.443800586-04:00
|
||||
imports:
|
||||
- name: github.com/btcsuite/btcd
|
||||
version: b8df516b4b267acf2de46be593a9d948d1d2c420
|
||||
version: 4803a8291c92a1d2d41041b942a9a9e37deab065
|
||||
subpackages:
|
||||
- btcec
|
||||
- name: github.com/btcsuite/fastsha256
|
||||
version: 637e656429416087660c84436a2a035d69d54e2e
|
||||
- name: github.com/ebuchman/fail-test
|
||||
version: 95f809107225be108efcf10a3509e4ea6ceef3c4
|
||||
- name: github.com/fsnotify/fsnotify
|
||||
version: 4da3e2cfbabc9f751898f250b49f2439785783a1
|
||||
- name: github.com/go-kit/kit
|
||||
version: d67bb4c202e3b91377d1079b110a6c9ce23ab2f8
|
||||
version: 0d313fb5fb3a94d87d61e6434785264e87a5d740
|
||||
subpackages:
|
||||
- log
|
||||
- log/level
|
||||
- log/term
|
||||
- name: github.com/go-logfmt/logfmt
|
||||
version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
|
||||
- name: github.com/go-playground/locales
|
||||
version: 1e5f1161c6416a5ff48840eb8724a394e48cc534
|
||||
subpackages:
|
||||
- currency
|
||||
- name: github.com/go-playground/universal-translator
|
||||
version: 71201497bace774495daed26a3874fd339e0b538
|
||||
- name: github.com/go-stack/stack
|
||||
version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82
|
||||
version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf
|
||||
- name: github.com/gogo/protobuf
|
||||
version: 9df9efe4c742f1a2bfdedf1c3b6902fc6e814c6b
|
||||
version: 2adc21fd136931e0388e278825291678e1d98309
|
||||
subpackages:
|
||||
- proto
|
||||
- name: github.com/golang/protobuf
|
||||
version: 18c9bb3261723cd5401db4d0c9fbc5c3b6c70fe8
|
||||
version: 130e6b02ab059e7b717a096f397c5b60111cae74
|
||||
subpackages:
|
||||
- proto
|
||||
- ptypes
|
||||
- ptypes/any
|
||||
- ptypes/duration
|
||||
- ptypes/timestamp
|
||||
- name: github.com/golang/snappy
|
||||
version: 553a641470496b2327abcac10b36396bd98e45c9
|
||||
- name: github.com/gorilla/websocket
|
||||
version: a91eba7f97777409bc2c443f5534d41dd20c5720
|
||||
version: 6f34763140ed8887aed6a044912009832b4733d7
|
||||
- name: github.com/hashicorp/hcl
|
||||
version: 392dba7d905ed5d04a5794ba89f558b27e2ba1ca
|
||||
version: 68e816d1c783414e79bc65b3994d9ab6b0a722ab
|
||||
subpackages:
|
||||
- hcl/ast
|
||||
- hcl/parser
|
||||
@@ -52,31 +59,31 @@ imports:
|
||||
- name: github.com/kr/logfmt
|
||||
version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0
|
||||
- name: github.com/magiconair/properties
|
||||
version: 51463bfca2576e06c62a8504b5c0f06d61312647
|
||||
version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a
|
||||
- name: github.com/mitchellh/mapstructure
|
||||
version: cc8532a8e9a55ea36402aa21efdf403a60d34096
|
||||
- name: github.com/pelletier/go-buffruneio
|
||||
version: c37440a7cf42ac63b919c752ca73a85067e05992
|
||||
version: d0303fe809921458f417bcf828397a65db30a7e4
|
||||
- name: github.com/pelletier/go-toml
|
||||
version: 5ccdfb18c776b740aecaf085c4d9a2779199c279
|
||||
version: 1d6b12b7cb290426e27e6b4e38b89fcda3aeef03
|
||||
- name: github.com/pkg/errors
|
||||
version: 645ef00459ed84a119197bfb8d8205042c6df63d
|
||||
- name: github.com/rcrowley/go-metrics
|
||||
version: 1f30fe9094a513ce4c700b9a54458bbb0c96996c
|
||||
- name: github.com/spf13/afero
|
||||
version: 9be650865eab0c12963d8753212f4f9c66cdcf12
|
||||
version: ee1bd8ee15a1306d1f9201acc41ef39cd9f99a1b
|
||||
subpackages:
|
||||
- mem
|
||||
- name: github.com/spf13/cast
|
||||
version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4
|
||||
- name: github.com/spf13/cobra
|
||||
version: 4cdb38c072b86bf795d2c81de50784d9fdd6eb77
|
||||
version: b78744579491c1ceeaaa3b40205e56b0591b93a3
|
||||
- name: github.com/spf13/jwalterweatherman
|
||||
version: 8f07c835e5cc1450c082fe3a439cf87b0cbb2d99
|
||||
version: 12bd96e66386c1960ab0f74ced1362f66f552f7b
|
||||
- name: github.com/spf13/pflag
|
||||
version: e57e3eeb33f795204c1ca35f56c44f83227c6e66
|
||||
version: 7aff26db30c1be810f9de5038ec5ef96ac41fd7c
|
||||
- name: github.com/spf13/viper
|
||||
version: 0967fc9aceab2ce9da34061253ac10fb99bba5b2
|
||||
version: 25b30aa063fc18e48662b86996252eabdcf2f0c7
|
||||
- name: github.com/syndtr/goleveldb
|
||||
version: 8c81ea47d4c41a385645e133e15510fc6a2a74b4
|
||||
version: b89cc31ef7977104127d34c1bd31ebd1a9db2199
|
||||
subpackages:
|
||||
- leveldb
|
||||
- leveldb/cache
|
||||
@@ -91,7 +98,7 @@ imports:
|
||||
- leveldb/table
|
||||
- leveldb/util
|
||||
- name: github.com/tendermint/abci
|
||||
version: 864d1f80b36b440bde030a5c18d8ac3aa8c2949d
|
||||
version: 191c4b6d176169ffc7f9972d490fa362a3b7d940
|
||||
subpackages:
|
||||
- client
|
||||
- example/counter
|
||||
@@ -104,21 +111,18 @@ imports:
|
||||
- edwards25519
|
||||
- extra25519
|
||||
- name: github.com/tendermint/go-crypto
|
||||
version: 95b7c9e09c49b91bfbb71bb63dd514eb55450f16
|
||||
version: 311e8c1bf00fa5868daad4f8ea56dcad539182c0
|
||||
- name: github.com/tendermint/go-wire
|
||||
version: 5f88da3dbc1a72844e6dfaf274ce87f851d488eb
|
||||
subpackages:
|
||||
- data
|
||||
- data/base58
|
||||
- name: github.com/tendermint/merkleeyes
|
||||
version: 102aaf5a8ffda1846413fb22805a94def2045b9f
|
||||
version: 2a93256d2c6fbcc3b55673c0d2b96a7e32c6238b
|
||||
subpackages:
|
||||
- app
|
||||
- client
|
||||
- iavl
|
||||
- testutil
|
||||
- name: github.com/tendermint/tmlibs
|
||||
version: 7ce4da1eee6004d627e780c8fe91e96d9b99e459
|
||||
version: 9997e3a3b46db1d2f88aa9816ed0e7915dad6ac1
|
||||
subpackages:
|
||||
- autofile
|
||||
- cli
|
||||
@@ -132,7 +136,7 @@ imports:
|
||||
- merkle
|
||||
- test
|
||||
- name: golang.org/x/crypto
|
||||
version: c7af5bf2638a1164f2eb5467c39c6cffbd13a02e
|
||||
version: 7d9177d70076375b9a59c8fde23d52d9c4a7ecd5
|
||||
subpackages:
|
||||
- curve25519
|
||||
- nacl/box
|
||||
@@ -143,7 +147,7 @@ imports:
|
||||
- ripemd160
|
||||
- salsa20/salsa
|
||||
- name: golang.org/x/net
|
||||
version: feeb485667d1fdabe727840fe00adc22431bc86e
|
||||
version: 0744d001aa8470aaa53df28d32e5ceeb8af9bd70
|
||||
subpackages:
|
||||
- context
|
||||
- http2
|
||||
@@ -153,41 +157,46 @@ imports:
|
||||
- lex/httplex
|
||||
- trace
|
||||
- name: golang.org/x/sys
|
||||
version: e62c3de784db939836898e5c19ffd41bece347da
|
||||
version: 429f518978ab01db8bb6f44b66785088e7fba58b
|
||||
subpackages:
|
||||
- unix
|
||||
- name: golang.org/x/text
|
||||
version: 470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4
|
||||
version: 1cbadb444a806fd9430d14ad08967ed91da4fa0a
|
||||
subpackages:
|
||||
- secure/bidirule
|
||||
- transform
|
||||
- unicode/bidi
|
||||
- unicode/norm
|
||||
- name: google.golang.org/genproto
|
||||
version: 411e09b969b1170a9f0c467558eb4c4c110d9c77
|
||||
version: 1e559d0a00eef8a9a43151db4665280bd8dd5886
|
||||
subpackages:
|
||||
- googleapis/rpc/status
|
||||
- name: google.golang.org/grpc
|
||||
version: 844f573616520565fdc6fb4db242321b5456fd6d
|
||||
version: d4b75ebd4f9f8c4a2b1cdadbdbe0d7920431ccca
|
||||
subpackages:
|
||||
- balancer
|
||||
- codes
|
||||
- connectivity
|
||||
- credentials
|
||||
- grpclb/grpc_lb_v1
|
||||
- grpclb/grpc_lb_v1/messages
|
||||
- grpclog
|
||||
- internal
|
||||
- keepalive
|
||||
- metadata
|
||||
- naming
|
||||
- peer
|
||||
- resolver
|
||||
- stats
|
||||
- status
|
||||
- tap
|
||||
- transport
|
||||
- name: gopkg.in/go-playground/validator.v9
|
||||
version: a021b2ec9a8a8bb970f3f15bc42617cb520e8a64
|
||||
- name: gopkg.in/yaml.v2
|
||||
version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
|
||||
version: eb3733d160e74a9c7e442f435eb3bea458e1d19f
|
||||
testImports:
|
||||
- name: github.com/davecgh/go-spew
|
||||
version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
|
||||
version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9
|
||||
subpackages:
|
||||
- spew
|
||||
- name: github.com/pmezard/go-difflib
|
||||
@@ -195,7 +204,7 @@ testImports:
|
||||
subpackages:
|
||||
- difflib
|
||||
- name: github.com/stretchr/testify
|
||||
version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0
|
||||
version: 890a5c3458b43e6104ff5da8dfa139d013d77544
|
||||
subpackages:
|
||||
- assert
|
||||
- require
|
||||
|
18
glide.yaml
18
glide.yaml
@@ -7,40 +7,37 @@ import:
|
||||
- package: github.com/golang/protobuf
|
||||
subpackages:
|
||||
- proto
|
||||
- package: github.com/pelletier/go-toml
|
||||
version: ^1.0.0
|
||||
- package: github.com/gorilla/websocket
|
||||
- package: github.com/pkg/errors
|
||||
version: ~0.8.0
|
||||
- package: github.com/rcrowley/go-metrics
|
||||
- package: github.com/spf13/cobra
|
||||
- package: github.com/spf13/viper
|
||||
- package: github.com/tendermint/abci
|
||||
version: v0.5.0
|
||||
version: ~0.6.0
|
||||
subpackages:
|
||||
- client
|
||||
- example/dummy
|
||||
- types
|
||||
- package: github.com/tendermint/go-crypto
|
||||
version: ~0.2.2
|
||||
version: ~0.3.0
|
||||
- package: github.com/tendermint/go-wire
|
||||
version: ~0.6.2
|
||||
subpackages:
|
||||
- data
|
||||
- package: github.com/tendermint/merkleeyes
|
||||
version: ~0.2.4
|
||||
version: master
|
||||
subpackages:
|
||||
- app
|
||||
- iavl
|
||||
- testutil
|
||||
- package: github.com/tendermint/tmlibs
|
||||
version: ~0.2.2
|
||||
version: ~0.3.1
|
||||
subpackages:
|
||||
- autofile
|
||||
- cli
|
||||
- cli/flags
|
||||
- clist
|
||||
- common
|
||||
- db
|
||||
- events
|
||||
- flowrate
|
||||
- log
|
||||
- merkle
|
||||
@@ -54,6 +51,9 @@ import:
|
||||
- context
|
||||
- package: google.golang.org/grpc
|
||||
testImport:
|
||||
- package: github.com/go-kit/kit
|
||||
subpackages:
|
||||
- log/term
|
||||
- package: github.com/stretchr/testify
|
||||
subpackages:
|
||||
- assert
|
||||
|
@@ -50,17 +50,22 @@ TODO: Better handle abci client errors. (make it automatically handle connection
|
||||
|
||||
const cacheSize = 100000
|
||||
|
||||
// Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus round.
|
||||
// Transaction validity is checked using the CheckTx abci message before the transaction is added to the pool.
|
||||
// The Mempool uses a concurrent list structure for storing transactions that can be efficiently accessed by multiple concurrent readers.
|
||||
type Mempool struct {
|
||||
config *cfg.MempoolConfig
|
||||
|
||||
proxyMtx sync.Mutex
|
||||
proxyAppConn proxy.AppConnMempool
|
||||
txs *clist.CList // concurrent linked-list of good txs
|
||||
counter int64 // simple incrementing counter
|
||||
height int // the last block Update()'d to
|
||||
rechecking int32 // for re-checking filtered txs on Update()
|
||||
recheckCursor *clist.CElement // next expected response
|
||||
recheckEnd *clist.CElement // re-checking stops here
|
||||
proxyMtx sync.Mutex
|
||||
proxyAppConn proxy.AppConnMempool
|
||||
txs *clist.CList // concurrent linked-list of good txs
|
||||
counter int64 // simple incrementing counter
|
||||
height int // the last block Update()'d to
|
||||
rechecking int32 // for re-checking filtered txs on Update()
|
||||
recheckCursor *clist.CElement // next expected response
|
||||
recheckEnd *clist.CElement // re-checking stops here
|
||||
notifiedTxsAvailable bool // true if fired on txsAvailable for this height
|
||||
txsAvailable chan int // fires the next height once for each height, when the mempool is not empty
|
||||
|
||||
// Keep a cache of already-seen txs.
|
||||
// This reduces the pressure on the proxyApp.
|
||||
@@ -72,13 +77,14 @@ type Mempool struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool) *Mempool {
|
||||
// NewMempool returns a new Mempool with the given configuration and connection to an application.
|
||||
func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, height int) *Mempool {
|
||||
mempool := &Mempool{
|
||||
config: config,
|
||||
proxyAppConn: proxyAppConn,
|
||||
txs: clist.New(),
|
||||
counter: 0,
|
||||
height: 0,
|
||||
height: height,
|
||||
rechecking: 0,
|
||||
recheckCursor: nil,
|
||||
recheckEnd: nil,
|
||||
@@ -90,7 +96,14 @@ func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool) *M
|
||||
return mempool
|
||||
}
|
||||
|
||||
// SetLogger allows you to set your own Logger.
|
||||
// EnableTxsAvailable initializes the TxsAvailable channel,
|
||||
// ensuring it will trigger once every height when transactions are available.
|
||||
// NOTE: not thread safe - should only be called once, on startup
|
||||
func (mem *Mempool) EnableTxsAvailable() {
|
||||
mem.txsAvailable = make(chan int, 1)
|
||||
}
|
||||
|
||||
// SetLogger sets the Logger.
|
||||
func (mem *Mempool) SetLogger(l log.Logger) {
|
||||
mem.logger = l
|
||||
}
|
||||
@@ -110,21 +123,22 @@ func (mem *Mempool) initWAL() {
|
||||
}
|
||||
}
|
||||
|
||||
// consensus must be able to hold lock to safely update
|
||||
// Lock locks the mempool. The consensus must be able to hold lock to safely update.
|
||||
func (mem *Mempool) Lock() {
|
||||
mem.proxyMtx.Lock()
|
||||
}
|
||||
|
||||
// Unlock unlocks the mempool.
|
||||
func (mem *Mempool) Unlock() {
|
||||
mem.proxyMtx.Unlock()
|
||||
}
|
||||
|
||||
// Number of transactions in the mempool clist
|
||||
// Size returns the number of transactions in the mempool.
|
||||
func (mem *Mempool) Size() int {
|
||||
return mem.txs.Len()
|
||||
}
|
||||
|
||||
// Remove all transactions from mempool and cache
|
||||
// Flush removes all transactions from the mempool and cache
|
||||
func (mem *Mempool) Flush() {
|
||||
mem.proxyMtx.Lock()
|
||||
defer mem.proxyMtx.Unlock()
|
||||
@@ -137,14 +151,15 @@ func (mem *Mempool) Flush() {
|
||||
}
|
||||
}
|
||||
|
||||
// Return the first element of mem.txs for peer goroutines to call .NextWait() on.
|
||||
// Blocks until txs has elements.
|
||||
// TxsFrontWait returns the first transaction in the ordered list for peer goroutines to call .NextWait() on.
|
||||
// It blocks until the mempool is not empty (ie. until the internal `mem.txs` has at least one element)
|
||||
func (mem *Mempool) TxsFrontWait() *clist.CElement {
|
||||
return mem.txs.FrontWait()
|
||||
}
|
||||
|
||||
// Try a new transaction in the mempool.
|
||||
// Potentially blocking if we're blocking on Update() or Reap().
|
||||
// CheckTx executes a new transaction against the application to determine its validity
|
||||
// and whether it should be added to the mempool.
|
||||
// It blocks if we're waiting on Update() or Reap().
|
||||
// cb: A callback from the CheckTx command.
|
||||
// It gets called from another goroutine.
|
||||
// CONTRACT: Either cb will get called, or err returned.
|
||||
@@ -164,7 +179,7 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
|
||||
},
|
||||
})
|
||||
}
|
||||
return nil
|
||||
return nil // TODO: return an error (?)
|
||||
}
|
||||
mem.cache.Push(tx)
|
||||
// END CACHE
|
||||
@@ -201,20 +216,23 @@ func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) {
|
||||
func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) {
|
||||
switch r := res.Value.(type) {
|
||||
case *abci.Response_CheckTx:
|
||||
tx := req.GetCheckTx().Tx
|
||||
if r.CheckTx.Code == abci.CodeType_OK {
|
||||
mem.counter++
|
||||
memTx := &mempoolTx{
|
||||
counter: mem.counter,
|
||||
height: int64(mem.height),
|
||||
tx: req.GetCheckTx().Tx,
|
||||
tx: tx,
|
||||
}
|
||||
mem.txs.PushBack(memTx)
|
||||
mem.logger.Info("Added good transaction", "tx", tx, "res", r)
|
||||
mem.notifyTxsAvailable()
|
||||
} else {
|
||||
// ignore bad transaction
|
||||
mem.logger.Info("Bad Transaction", "res", r)
|
||||
mem.logger.Info("Rejected bad transaction", "tx", tx, "res", r)
|
||||
|
||||
// remove from cache (it might be good later)
|
||||
mem.cache.Remove(req.GetCheckTx().Tx)
|
||||
mem.cache.Remove(tx)
|
||||
|
||||
// TODO: handle other retcodes
|
||||
}
|
||||
@@ -250,14 +268,35 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) {
|
||||
// Done!
|
||||
atomic.StoreInt32(&mem.rechecking, 0)
|
||||
mem.logger.Info("Done rechecking txs")
|
||||
|
||||
mem.notifyTxsAvailable()
|
||||
}
|
||||
default:
|
||||
// ignore other messages
|
||||
}
|
||||
}
|
||||
|
||||
// Get the valid transactions remaining
|
||||
// If maxTxs is -1, there is no cap on returned transactions.
|
||||
// TxsAvailable returns a channel which fires once for every height,
|
||||
// and only when transactions are available in the mempool.
|
||||
// NOTE: the returned channel may be nil if EnableTxsAvailable was not called.
|
||||
func (mem *Mempool) TxsAvailable() <-chan int {
|
||||
return mem.txsAvailable
|
||||
}
|
||||
|
||||
func (mem *Mempool) notifyTxsAvailable() {
|
||||
if mem.Size() == 0 {
|
||||
panic("notified txs available but mempool is empty!")
|
||||
}
|
||||
if mem.txsAvailable != nil &&
|
||||
!mem.notifiedTxsAvailable {
|
||||
|
||||
mem.notifiedTxsAvailable = true
|
||||
mem.txsAvailable <- mem.height + 1
|
||||
}
|
||||
}
|
||||
|
||||
// Reap returns a list of transactions currently in the mempool.
|
||||
// If maxTxs is -1, there is no cap on the number of returned transactions.
|
||||
func (mem *Mempool) Reap(maxTxs int) types.Txs {
|
||||
mem.proxyMtx.Lock()
|
||||
defer mem.proxyMtx.Unlock()
|
||||
@@ -286,8 +325,7 @@ func (mem *Mempool) collectTxs(maxTxs int) types.Txs {
|
||||
return txs
|
||||
}
|
||||
|
||||
// Tell mempool that these txs were committed.
|
||||
// Mempool will discard these txs.
|
||||
// Update informs the mempool that the given txs were committed and can be discarded.
|
||||
// NOTE: this should be called *after* block is committed by consensus.
|
||||
// NOTE: unsafe; Lock/Unlock must be managed by caller
|
||||
func (mem *Mempool) Update(height int, txs types.Txs) {
|
||||
@@ -302,13 +340,15 @@ func (mem *Mempool) Update(height int, txs types.Txs) {
|
||||
|
||||
// Set height
|
||||
mem.height = height
|
||||
mem.notifiedTxsAvailable = false
|
||||
|
||||
// Remove transactions that are already in txs.
|
||||
goodTxs := mem.filterTxs(txsMap)
|
||||
// Recheck mempool txs if any txs were committed in the block
|
||||
// NOTE/XXX: in some apps a tx could be invalidated due to EndBlock,
|
||||
// so we really still do need to recheck, but this is for debugging
|
||||
if mem.config.Recheck && (mem.config.RecheckEmpty || len(txs) > 0) {
|
||||
mem.logger.Info("Recheck txs", "numtxs", len(goodTxs))
|
||||
mem.logger.Info("Recheck txs", "numtxs", len(goodTxs), "height", height)
|
||||
mem.recheckTxs(goodTxs)
|
||||
// At this point, mem.txs are being rechecked.
|
||||
// mem.recheckCursor re-scans mem.txs and possibly removes some txs.
|
||||
@@ -354,19 +394,21 @@ func (mem *Mempool) recheckTxs(goodTxs []types.Tx) {
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
// A transaction that successfully ran
|
||||
// mempoolTx is a transaction that successfully ran
|
||||
type mempoolTx struct {
|
||||
counter int64 // a simple incrementing counter
|
||||
height int64 // height that this tx had been validated in
|
||||
tx types.Tx //
|
||||
}
|
||||
|
||||
// Height returns the height for this transaction
|
||||
func (memTx *mempoolTx) Height() int {
|
||||
return int(atomic.LoadInt64(&memTx.height))
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
// txCache maintains a cache of transactions.
|
||||
type txCache struct {
|
||||
mtx sync.Mutex
|
||||
size int
|
||||
@@ -374,6 +416,7 @@ type txCache struct {
|
||||
list *list.List // to remove oldest tx when cache gets too big
|
||||
}
|
||||
|
||||
// newTxCache returns a new txCache.
|
||||
func newTxCache(cacheSize int) *txCache {
|
||||
return &txCache{
|
||||
size: cacheSize,
|
||||
@@ -382,6 +425,7 @@ func newTxCache(cacheSize int) *txCache {
|
||||
}
|
||||
}
|
||||
|
||||
// Reset resets the txCache to empty.
|
||||
func (cache *txCache) Reset() {
|
||||
cache.mtx.Lock()
|
||||
cache.map_ = make(map[string]struct{}, cacheSize)
|
||||
@@ -389,6 +433,7 @@ func (cache *txCache) Reset() {
|
||||
cache.mtx.Unlock()
|
||||
}
|
||||
|
||||
// Exists returns true if the given tx is cached.
|
||||
func (cache *txCache) Exists(tx types.Tx) bool {
|
||||
cache.mtx.Lock()
|
||||
_, exists := cache.map_[string(tx)]
|
||||
@@ -396,7 +441,7 @@ func (cache *txCache) Exists(tx types.Tx) bool {
|
||||
return exists
|
||||
}
|
||||
|
||||
// Returns false if tx is in cache.
|
||||
// Push adds the given tx to the txCache. It returns false if tx is already in the cache.
|
||||
func (cache *txCache) Push(tx types.Tx) bool {
|
||||
cache.mtx.Lock()
|
||||
defer cache.mtx.Unlock()
|
||||
@@ -418,6 +463,7 @@ func (cache *txCache) Push(tx types.Tx) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Remove removes the given tx from the cache.
|
||||
func (cache *txCache) Remove(tx types.Tx) {
|
||||
cache.mtx.Lock()
|
||||
delete(cache.map_, string(tx))
|
||||
|
@@ -1,34 +1,113 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
)
|
||||
|
||||
func TestSerialReap(t *testing.T) {
|
||||
func newMempoolWithApp(cc proxy.ClientCreator) *Mempool {
|
||||
config := cfg.ResetTestRoot("mempool_test")
|
||||
|
||||
appConnMem, _ := cc.NewABCIClient()
|
||||
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
|
||||
appConnMem.Start()
|
||||
mempool := NewMempool(config.Mempool, appConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger())
|
||||
return mempool
|
||||
}
|
||||
|
||||
func ensureNoFire(t *testing.T, ch <-chan int, timeoutMS int) {
|
||||
timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
|
||||
select {
|
||||
case <-ch:
|
||||
t.Fatal("Expected not to fire")
|
||||
case <-timer.C:
|
||||
}
|
||||
}
|
||||
|
||||
func ensureFire(t *testing.T, ch <-chan int, timeoutMS int) {
|
||||
timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
|
||||
select {
|
||||
case <-ch:
|
||||
case <-timer.C:
|
||||
t.Fatal("Expected to fire")
|
||||
}
|
||||
}
|
||||
|
||||
func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs {
|
||||
txs := make(types.Txs, count)
|
||||
for i := 0; i < count; i++ {
|
||||
txBytes := make([]byte, 20)
|
||||
txs[i] = txBytes
|
||||
rand.Read(txBytes)
|
||||
err := mempool.CheckTx(txBytes, nil)
|
||||
if err != nil {
|
||||
t.Fatal("Error after CheckTx: %v", err)
|
||||
}
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func TestTxsAvailable(t *testing.T) {
|
||||
app := dummy.NewDummyApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool := newMempoolWithApp(cc)
|
||||
mempool.EnableTxsAvailable()
|
||||
|
||||
timeoutMS := 500
|
||||
|
||||
// with no txs, it shouldnt fire
|
||||
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
|
||||
// send a bunch of txs, it should only fire once
|
||||
txs := checkTxs(t, mempool, 100)
|
||||
ensureFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
|
||||
// call update with half the txs.
|
||||
// it should fire once now for the new height
|
||||
// since there are still txs left
|
||||
committedTxs, txs := txs[:50], txs[50:]
|
||||
mempool.Update(1, committedTxs)
|
||||
ensureFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
|
||||
// send a bunch more txs. we already fired for this height so it shouldnt fire again
|
||||
moreTxs := checkTxs(t, mempool, 50)
|
||||
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
|
||||
// now call update with all the txs. it should not fire as there are no txs left
|
||||
committedTxs = append(txs, moreTxs...)
|
||||
mempool.Update(2, committedTxs)
|
||||
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
|
||||
// send a bunch more txs, it should only fire once
|
||||
checkTxs(t, mempool, 100)
|
||||
ensureFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
|
||||
}
|
||||
|
||||
func TestSerialReap(t *testing.T) {
|
||||
app := counter.NewCounterApplication(true)
|
||||
app.SetOption("serial", "on")
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
appConnMem, _ := cc.NewABCIClient()
|
||||
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
|
||||
if _, err := appConnMem.Start(); err != nil {
|
||||
t.Fatalf("Error starting ABCI client: %v", err.Error())
|
||||
}
|
||||
|
||||
mempool := newMempoolWithApp(cc)
|
||||
appConnCon, _ := cc.NewABCIClient()
|
||||
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
|
||||
if _, err := appConnCon.Start(); err != nil {
|
||||
t.Fatalf("Error starting ABCI client: %v", err.Error())
|
||||
}
|
||||
mempool := NewMempool(config.Mempool, appConnMem)
|
||||
mempool.SetLogger(log.TestingLogger())
|
||||
|
||||
deliverTxsRange := func(start, end int) {
|
||||
// Deliver some txs.
|
||||
|
@@ -9,6 +9,7 @@ import (
|
||||
abci "github.com/tendermint/abci/types"
|
||||
wire "github.com/tendermint/go-wire"
|
||||
"github.com/tendermint/tmlibs/clist"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -30,6 +31,7 @@ type MempoolReactor struct {
|
||||
evsw types.EventSwitch
|
||||
}
|
||||
|
||||
// NewMempoolReactor returns a new MempoolReactor with the given config and mempool.
|
||||
func NewMempoolReactor(config *cfg.MempoolConfig, mempool *Mempool) *MempoolReactor {
|
||||
memR := &MempoolReactor{
|
||||
config: config,
|
||||
@@ -39,7 +41,14 @@ func NewMempoolReactor(config *cfg.MempoolConfig, mempool *Mempool) *MempoolReac
|
||||
return memR
|
||||
}
|
||||
|
||||
// Implements Reactor
|
||||
// SetLogger sets the Logger on the reactor and the underlying Mempool.
|
||||
func (memR *MempoolReactor) SetLogger(l log.Logger) {
|
||||
memR.Logger = l
|
||||
memR.Mempool.SetLogger(l)
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor.
|
||||
// It returns the list of channels for this reactor.
|
||||
func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
&p2p.ChannelDescriptor{
|
||||
@@ -49,18 +58,20 @@ func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Reactor
|
||||
func (memR *MempoolReactor) AddPeer(peer *p2p.Peer) {
|
||||
// AddPeer implements Reactor.
|
||||
// It starts a broadcast routine ensuring all txs are forwarded to the given peer.
|
||||
func (memR *MempoolReactor) AddPeer(peer p2p.Peer) {
|
||||
go memR.broadcastTxRoutine(peer)
|
||||
}
|
||||
|
||||
// Implements Reactor
|
||||
func (memR *MempoolReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
|
||||
// RemovePeer implements Reactor.
|
||||
func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
// broadcast routine checks if peer is gone and returns
|
||||
}
|
||||
|
||||
// Implements Reactor
|
||||
func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
|
||||
// Receive implements Reactor.
|
||||
// It adds any received transactions to the mempool.
|
||||
func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
_, msg, err := DecodeMessage(msgBytes)
|
||||
if err != nil {
|
||||
memR.Logger.Error("Error decoding message", "err", err)
|
||||
@@ -72,11 +83,7 @@ func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
|
||||
case *TxMessage:
|
||||
err := memR.Mempool.CheckTx(msg.Tx, nil)
|
||||
if err != nil {
|
||||
// Bad, seen, or conflicting tx.
|
||||
memR.Logger.Info("Could not add tx", "tx", msg.Tx)
|
||||
return
|
||||
} else {
|
||||
memR.Logger.Info("Added valid tx", "tx", msg.Tx)
|
||||
memR.Logger.Info("Could not check tx", "tx", msg.Tx, "err", err)
|
||||
}
|
||||
// broadcasting happens from go routines per peer
|
||||
default:
|
||||
@@ -84,15 +91,17 @@ func (memR *MempoolReactor) Receive(chID byte, src *p2p.Peer, msgBytes []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
// Just an alias for CheckTx since broadcasting happens in peer routines
|
||||
// BroadcastTx is an alias for Mempool.CheckTx. Broadcasting itself happens in peer routines.
|
||||
func (memR *MempoolReactor) BroadcastTx(tx types.Tx, cb func(*abci.Response)) error {
|
||||
return memR.Mempool.CheckTx(tx, cb)
|
||||
}
|
||||
|
||||
// PeerState describes the state of a peer.
|
||||
type PeerState interface {
|
||||
GetHeight() int
|
||||
}
|
||||
|
||||
// Peer describes a peer.
|
||||
type Peer interface {
|
||||
IsRunning() bool
|
||||
Send(byte, interface{}) bool
|
||||
@@ -141,7 +150,7 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer Peer) {
|
||||
}
|
||||
}
|
||||
|
||||
// implements events.Eventable
|
||||
// SetEventSwitch implements events.Eventable.
|
||||
func (memR *MempoolReactor) SetEventSwitch(evsw types.EventSwitch) {
|
||||
memR.evsw = evsw
|
||||
}
|
||||
@@ -153,6 +162,7 @@ const (
|
||||
msgTypeTx = byte(0x01)
|
||||
)
|
||||
|
||||
// MempoolMessage is a message sent or received by the MempoolReactor.
|
||||
type MempoolMessage interface{}
|
||||
|
||||
var _ = wire.RegisterInterface(
|
||||
@@ -160,6 +170,7 @@ var _ = wire.RegisterInterface(
|
||||
wire.ConcreteType{&TxMessage{}, msgTypeTx},
|
||||
)
|
||||
|
||||
// DecodeMessage decodes a byte-array into a MempoolMessage.
|
||||
func DecodeMessage(bz []byte) (msgType byte, msg MempoolMessage, err error) {
|
||||
msgType = bz[0]
|
||||
n := new(int)
|
||||
@@ -170,10 +181,12 @@ func DecodeMessage(bz []byte) (msgType byte, msg MempoolMessage, err error) {
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// TxMessage is a MempoolMessage containing a transaction.
|
||||
type TxMessage struct {
|
||||
Tx types.Tx
|
||||
}
|
||||
|
||||
// String returns a string representation of the TxMessage.
|
||||
func (m *TxMessage) String() string {
|
||||
return fmt.Sprintf("[TxMessage %v]", m.Tx)
|
||||
}
|
||||
|
108
mempool/reactor_test.go
Normal file
108
mempool/reactor_test.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package mempool
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/go-kit/kit/log/term"
|
||||
|
||||
"github.com/tendermint/abci/example/dummy"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// mempoolLogger is a TestingLogger which uses a different
|
||||
// color for each validator ("validator" key must exist).
|
||||
func mempoolLogger() log.Logger {
|
||||
return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor {
|
||||
for i := 0; i < len(keyvals)-1; i += 2 {
|
||||
if keyvals[i] == "validator" {
|
||||
return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))}
|
||||
}
|
||||
}
|
||||
return term.FgBgColor{}
|
||||
})
|
||||
}
|
||||
|
||||
// connect N mempool reactors through N switches
|
||||
func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor {
|
||||
reactors := make([]*MempoolReactor, N)
|
||||
logger := mempoolLogger()
|
||||
for i := 0; i < N; i++ {
|
||||
app := dummy.NewDummyApplication()
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
mempool := newMempoolWithApp(cc)
|
||||
|
||||
reactors[i] = NewMempoolReactor(config.Mempool, mempool) // so we dont start the consensus states
|
||||
reactors[i].SetLogger(logger.With("validator", i))
|
||||
}
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("MEMPOOL", reactors[i])
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
return reactors
|
||||
}
|
||||
|
||||
// wait for all txs on all reactors
|
||||
func waitForTxs(t *testing.T, txs types.Txs, reactors []*MempoolReactor) {
|
||||
// wait for the txs in all mempools
|
||||
wg := new(sync.WaitGroup)
|
||||
for i := 0; i < len(reactors); i++ {
|
||||
wg.Add(1)
|
||||
go _waitForTxs(t, wg, txs, i, reactors)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
timer := time.After(TIMEOUT)
|
||||
select {
|
||||
case <-timer:
|
||||
t.Fatal("Timed out waiting for txs")
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
|
||||
// wait for all txs on a single mempool
|
||||
func _waitForTxs(t *testing.T, wg *sync.WaitGroup, txs types.Txs, reactorIdx int, reactors []*MempoolReactor) {
|
||||
|
||||
mempool := reactors[reactorIdx].Mempool
|
||||
for mempool.Size() != len(txs) {
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
reapedTxs := mempool.Reap(len(txs))
|
||||
for i, tx := range txs {
|
||||
assert.Equal(t, tx, reapedTxs[i], fmt.Sprintf("txs at index %d on reactor %d don't match: %v vs %v", i, reactorIdx, tx, reapedTxs[i]))
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
var (
|
||||
NUM_TXS = 1000
|
||||
TIMEOUT = 120 * time.Second // ridiculously high because CircleCI is slow
|
||||
)
|
||||
|
||||
func TestReactorBroadcastTxMessage(t *testing.T) {
|
||||
config := cfg.TestConfig()
|
||||
N := 4
|
||||
reactors := makeAndConnectMempoolReactors(config, N)
|
||||
|
||||
// send a bunch of txs to the first reactor's mempool
|
||||
// and wait for them all to be received in the others
|
||||
txs := checkTxs(t, reactors[0].Mempool, NUM_TXS)
|
||||
waitForTxs(t, txs, reactors)
|
||||
}
|
168
node/node.go
168
node/node.go
@@ -3,6 +3,7 @@ package node
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -10,11 +11,15 @@ import (
|
||||
abci "github.com/tendermint/abci/types"
|
||||
crypto "github.com/tendermint/go-crypto"
|
||||
wire "github.com/tendermint/go-wire"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
dbm "github.com/tendermint/tmlibs/db"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/consensus"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
p2p "github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
rpccore "github.com/tendermint/tendermint/rpc/core"
|
||||
grpccore "github.com/tendermint/tendermint/rpc/grpc"
|
||||
@@ -26,20 +31,66 @@ import (
|
||||
"github.com/tendermint/tendermint/state/txindex/null"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
dbm "github.com/tendermint/tmlibs/db"
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
_ "net/http/pprof"
|
||||
)
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// DBContext specifies config information for loading a new DB.
|
||||
type DBContext struct {
|
||||
ID string
|
||||
Config *cfg.Config
|
||||
}
|
||||
|
||||
// DBProvider takes a DBContext and returns an instantiated DB.
|
||||
type DBProvider func(*DBContext) (dbm.DB, error)
|
||||
|
||||
// DefaultDBProvider returns a database using the DBBackend and DBDir
|
||||
// specified in the ctx.Config.
|
||||
func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
|
||||
return dbm.NewDB(ctx.ID, ctx.Config.DBBackend, ctx.Config.DBDir()), nil
|
||||
}
|
||||
|
||||
// GenesisDocProvider returns a GenesisDoc.
|
||||
// It allows the GenesisDoc to be pulled from sources other than the
|
||||
// filesystem, for instance from a distributed key-value store cluster.
|
||||
type GenesisDocProvider func() (*types.GenesisDoc, error)
|
||||
|
||||
// DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
|
||||
// the GenesisDoc from the config.GenesisFile() on the filesystem.
|
||||
func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
|
||||
return func() (*types.GenesisDoc, error) {
|
||||
return types.GenesisDocFromFile(config.GenesisFile())
|
||||
}
|
||||
}
|
||||
|
||||
// NodeProvider takes a config and a logger and returns a ready to go Node.
|
||||
type NodeProvider func(*cfg.Config, log.Logger) (*Node, error)
|
||||
|
||||
// DefaultNewNode returns a Tendermint node with default settings for the
|
||||
// PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
|
||||
// It implements NodeProvider.
|
||||
func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
|
||||
return NewNode(config,
|
||||
types.LoadOrGenPrivValidatorFS(config.PrivValidatorFile()),
|
||||
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
|
||||
DefaultGenesisDocProviderFunc(config),
|
||||
DefaultDBProvider,
|
||||
logger)
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// Node is the highest level interface to a full Tendermint node.
|
||||
// It includes all configuration information and running services.
|
||||
type Node struct {
|
||||
cmn.BaseService
|
||||
|
||||
// config
|
||||
config *cfg.Config
|
||||
genesisDoc *types.GenesisDoc // initial validator set
|
||||
privValidator *types.PrivValidator // local node's validator key
|
||||
genesisDoc *types.GenesisDoc // initial validator set
|
||||
privValidator types.PrivValidator // local node's validator key
|
||||
|
||||
// network
|
||||
privKey crypto.PrivKeyEd25519 // local node's p2p key
|
||||
@@ -58,24 +109,42 @@ type Node struct {
|
||||
txIndexer txindex.TxIndexer
|
||||
}
|
||||
|
||||
func NewNodeDefault(config *cfg.Config, logger log.Logger) *Node {
|
||||
// Get PrivValidator
|
||||
privValidator := types.LoadOrGenPrivValidator(config.PrivValidatorFile(), logger)
|
||||
return NewNode(config, privValidator,
|
||||
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), logger)
|
||||
}
|
||||
// NewNode returns a new, ready to go, Tendermint Node.
|
||||
func NewNode(config *cfg.Config,
|
||||
privValidator types.PrivValidator,
|
||||
clientCreator proxy.ClientCreator,
|
||||
genesisDocProvider GenesisDocProvider,
|
||||
dbProvider DBProvider,
|
||||
logger log.Logger) (*Node, error) {
|
||||
|
||||
func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreator proxy.ClientCreator, logger log.Logger) *Node {
|
||||
// Get BlockStore
|
||||
blockStoreDB := dbm.NewDB("blockstore", config.DBBackend, config.DBDir())
|
||||
blockStoreDB, err := dbProvider(&DBContext{"blockstore", config})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockStore := bc.NewBlockStore(blockStoreDB)
|
||||
|
||||
consensusLogger := logger.With("module", "consensus")
|
||||
stateLogger := logger.With("module", "state")
|
||||
|
||||
// Get State
|
||||
stateDB := dbm.NewDB("state", config.DBBackend, config.DBDir())
|
||||
state := sm.GetState(stateDB, config.GenesisFile())
|
||||
stateDB, err := dbProvider(&DBContext{"state", config})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state := sm.LoadState(stateDB)
|
||||
if state == nil {
|
||||
genDoc, err := genesisDocProvider()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = sm.MakeGenesisState(stateDB, genDoc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state.Save()
|
||||
}
|
||||
|
||||
state.SetLogger(stateLogger)
|
||||
|
||||
// Create the proxyApp, which manages connections (consensus, mempool, query)
|
||||
@@ -85,7 +154,7 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreat
|
||||
proxyApp := proxy.NewAppConns(clientCreator, handshaker)
|
||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||
if _, err := proxyApp.Start(); err != nil {
|
||||
cmn.Exit(cmn.Fmt("Error starting proxy app connections: %v", err))
|
||||
return nil, fmt.Errorf("Error starting proxy app connections: %v", err)
|
||||
}
|
||||
|
||||
// reload the state (it may have been updated by the handshake)
|
||||
@@ -96,7 +165,10 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreat
|
||||
var txIndexer txindex.TxIndexer
|
||||
switch config.TxIndex {
|
||||
case "kv":
|
||||
store := dbm.NewDB("tx_index", config.DBBackend, config.DBDir())
|
||||
store, err := dbProvider(&DBContext{"tx_index", config})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txIndexer = kv.NewTxIndex(store)
|
||||
default:
|
||||
txIndexer = &null.TxIndex{}
|
||||
@@ -109,9 +181,8 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreat
|
||||
// Make event switch
|
||||
eventSwitch := types.NewEventSwitch()
|
||||
eventSwitch.SetLogger(logger.With("module", "types"))
|
||||
_, err := eventSwitch.Start()
|
||||
if err != nil {
|
||||
cmn.Exit(cmn.Fmt("Failed to start switch: %v", err))
|
||||
if _, err := eventSwitch.Start(); err != nil {
|
||||
return nil, fmt.Errorf("Failed to start switch: %v", err)
|
||||
}
|
||||
|
||||
// Decide whether to fast-sync or not
|
||||
@@ -119,13 +190,13 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreat
|
||||
fastSync := config.FastSync
|
||||
if state.Validators.Size() == 1 {
|
||||
addr, _ := state.Validators.GetByIndex(0)
|
||||
if bytes.Equal(privValidator.Address, addr) {
|
||||
if bytes.Equal(privValidator.GetAddress(), addr) {
|
||||
fastSync = false
|
||||
}
|
||||
}
|
||||
|
||||
// Log whether this node is a validator or an observer
|
||||
if state.Validators.HasAddress(privValidator.Address) {
|
||||
if state.Validators.HasAddress(privValidator.GetAddress()) {
|
||||
consensusLogger.Info("This node is a validator")
|
||||
} else {
|
||||
consensusLogger.Info("This node is not a validator")
|
||||
@@ -137,11 +208,15 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreat
|
||||
|
||||
// Make MempoolReactor
|
||||
mempoolLogger := logger.With("module", "mempool")
|
||||
mempool := mempl.NewMempool(config.Mempool, proxyApp.Mempool())
|
||||
mempool := mempl.NewMempool(config.Mempool, proxyApp.Mempool(), state.LastBlockHeight)
|
||||
mempool.SetLogger(mempoolLogger)
|
||||
mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool)
|
||||
mempoolReactor.SetLogger(mempoolLogger)
|
||||
|
||||
if config.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
// Make ConsensusReactor
|
||||
consensusState := consensus.NewConsensusState(config.Consensus, state.Copy(), proxyApp.Consensus(), blockStore, mempool)
|
||||
consensusState.SetLogger(consensusLogger)
|
||||
@@ -228,12 +303,13 @@ func NewNode(config *cfg.Config, privValidator *types.PrivValidator, clientCreat
|
||||
txIndexer: txIndexer,
|
||||
}
|
||||
node.BaseService = *cmn.NewBaseService(logger, "Node", node)
|
||||
return node
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// OnStart starts the Node. It implements cmn.Service.
|
||||
func (n *Node) OnStart() error {
|
||||
// Create & add listener
|
||||
protocol, address := ProtocolAndAddress(n.config.P2P.ListenAddress)
|
||||
protocol, address := cmn.ProtocolAndAddress(n.config.P2P.ListenAddress)
|
||||
l := p2p.NewDefaultListener(protocol, address, n.config.P2P.SkipUPNP, n.Logger.With("module", "p2p"))
|
||||
n.sw.AddListener(l)
|
||||
|
||||
@@ -266,6 +342,7 @@ func (n *Node) OnStart() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop stops the Node. It implements cmn.Service.
|
||||
func (n *Node) OnStop() {
|
||||
n.BaseService.OnStop()
|
||||
|
||||
@@ -281,6 +358,7 @@ func (n *Node) OnStop() {
|
||||
}
|
||||
}
|
||||
|
||||
// RunForever waits for an interupt signal and stops the node.
|
||||
func (n *Node) RunForever() {
|
||||
// Sleep forever and then...
|
||||
cmn.TrapSignal(func() {
|
||||
@@ -288,15 +366,15 @@ func (n *Node) RunForever() {
|
||||
})
|
||||
}
|
||||
|
||||
// Add the event switch to reactors, mempool, etc.
|
||||
// SetEventSwitch adds the event switch to reactors, mempool, etc.
|
||||
func SetEventSwitch(evsw types.EventSwitch, eventables ...types.Eventable) {
|
||||
for _, e := range eventables {
|
||||
e.SetEventSwitch(evsw)
|
||||
}
|
||||
}
|
||||
|
||||
// Add a Listener to accept inbound peer connections.
|
||||
// Add listeners before starting the Node.
|
||||
// AddListener adds a listener to accept inbound peer connections.
|
||||
// It should be called before starting the Node.
|
||||
// The first listener is the primary listener (in NodeInfo)
|
||||
func (n *Node) AddListener(l p2p.Listener) {
|
||||
n.sw.AddListener(l)
|
||||
@@ -310,11 +388,12 @@ func (n *Node) ConfigureRPC() {
|
||||
rpccore.SetConsensusState(n.consensusState)
|
||||
rpccore.SetMempool(n.mempoolReactor.Mempool)
|
||||
rpccore.SetSwitch(n.sw)
|
||||
rpccore.SetPubKey(n.privValidator.PubKey)
|
||||
rpccore.SetPubKey(n.privValidator.GetPubKey())
|
||||
rpccore.SetGenesisDoc(n.genesisDoc)
|
||||
rpccore.SetAddrBook(n.addrBook)
|
||||
rpccore.SetProxyAppQuery(n.proxyApp.Query())
|
||||
rpccore.SetTxIndexer(n.txIndexer)
|
||||
rpccore.SetConsensusReactor(n.consensusReactor)
|
||||
rpccore.SetLogger(n.Logger.With("module", "rpc"))
|
||||
}
|
||||
|
||||
@@ -330,9 +409,9 @@ func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
listeners := make([]net.Listener, len(listenAddrs))
|
||||
for i, listenAddr := range listenAddrs {
|
||||
mux := http.NewServeMux()
|
||||
wm := rpcserver.NewWebsocketManager(rpccore.Routes, n.evsw)
|
||||
rpcLogger := n.Logger.With("module", "rpc-server")
|
||||
wm.SetLogger(rpcLogger)
|
||||
wm := rpcserver.NewWebsocketManager(rpccore.Routes, n.evsw)
|
||||
wm.SetLogger(rpcLogger.With("protocol", "websocket"))
|
||||
mux.HandleFunc("/websocket", wm.WebsocketHandler)
|
||||
rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
|
||||
listener, err := rpcserver.StartHTTPServer(listenAddr, mux, rpcLogger)
|
||||
@@ -355,39 +434,48 @@ func (n *Node) startRPC() ([]net.Listener, error) {
|
||||
return listeners, nil
|
||||
}
|
||||
|
||||
// Switch returns the Node's Switch.
|
||||
func (n *Node) Switch() *p2p.Switch {
|
||||
return n.sw
|
||||
}
|
||||
|
||||
// BlockStore returns the Node's BlockStore.
|
||||
func (n *Node) BlockStore() *bc.BlockStore {
|
||||
return n.blockStore
|
||||
}
|
||||
|
||||
// ConsensusState returns the Node's ConsensusState.
|
||||
func (n *Node) ConsensusState() *consensus.ConsensusState {
|
||||
return n.consensusState
|
||||
}
|
||||
|
||||
// ConsensusReactor returns the Node's ConsensusReactor.
|
||||
func (n *Node) ConsensusReactor() *consensus.ConsensusReactor {
|
||||
return n.consensusReactor
|
||||
}
|
||||
|
||||
// MempoolReactor returns the Node's MempoolReactor.
|
||||
func (n *Node) MempoolReactor() *mempl.MempoolReactor {
|
||||
return n.mempoolReactor
|
||||
}
|
||||
|
||||
// EventSwitch returns the Node's EventSwitch.
|
||||
func (n *Node) EventSwitch() types.EventSwitch {
|
||||
return n.evsw
|
||||
}
|
||||
|
||||
// XXX: for convenience
|
||||
func (n *Node) PrivValidator() *types.PrivValidator {
|
||||
// PrivValidator returns the Node's PrivValidator.
|
||||
// XXX: for convenience only!
|
||||
func (n *Node) PrivValidator() types.PrivValidator {
|
||||
return n.privValidator
|
||||
}
|
||||
|
||||
// GenesisDoc returns the Node's GenesisDoc.
|
||||
func (n *Node) GenesisDoc() *types.GenesisDoc {
|
||||
return n.genesisDoc
|
||||
}
|
||||
|
||||
// ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
|
||||
func (n *Node) ProxyApp() proxy.AppConns {
|
||||
return n.proxyApp
|
||||
}
|
||||
@@ -437,22 +525,14 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo {
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// NodeInfo returns the Node's Info from the Switch.
|
||||
func (n *Node) NodeInfo() *p2p.NodeInfo {
|
||||
return n.sw.NodeInfo()
|
||||
}
|
||||
|
||||
// DialSeeds dials the given seeds on the Switch.
|
||||
func (n *Node) DialSeeds(seeds []string) error {
|
||||
return n.sw.DialSeeds(n.addrBook, seeds)
|
||||
}
|
||||
|
||||
// Defaults to tcp
|
||||
func ProtocolAndAddress(listenAddr string) (string, string) {
|
||||
protocol, address := "tcp", listenAddr
|
||||
parts := strings.SplitN(address, "://", 2)
|
||||
if len(parts) == 2 {
|
||||
protocol, address = parts[0], parts[1]
|
||||
}
|
||||
return protocol, address
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
@@ -4,15 +4,19 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tmlibs/log"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
)
|
||||
|
||||
func TestNodeStartStop(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("node_node_test")
|
||||
|
||||
// Create & start node
|
||||
n := NewNodeDefault(config, log.TestingLogger())
|
||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||
assert.NoError(t, err, "expected no err on DefaultNewNode")
|
||||
n.Start()
|
||||
t.Logf("Started node %v", n.sw.NodeInfo())
|
||||
|
||||
|
@@ -21,12 +21,16 @@ const (
|
||||
minWriteBufferSize = 65536
|
||||
updateState = 2 * time.Second
|
||||
pingTimeout = 40 * time.Second
|
||||
flushThrottle = 100 * time.Millisecond
|
||||
|
||||
// some of these defaults are written in the user config
|
||||
// flushThrottle, sendRate, recvRate
|
||||
// TODO: remove values present in config
|
||||
defaultFlushThrottle = 100 * time.Millisecond
|
||||
|
||||
defaultSendQueueCapacity = 1
|
||||
defaultSendRate = int64(512000) // 500KB/s
|
||||
defaultRecvBufferCapacity = 4096
|
||||
defaultRecvMessageCapacity = 22020096 // 21MB
|
||||
defaultSendRate = int64(512000) // 500KB/s
|
||||
defaultRecvRate = int64(512000) // 500KB/s
|
||||
defaultSendTimeout = 10 * time.Second
|
||||
)
|
||||
@@ -89,13 +93,23 @@ type MConnection struct {
|
||||
type MConnConfig struct {
|
||||
SendRate int64 `mapstructure:"send_rate"`
|
||||
RecvRate int64 `mapstructure:"recv_rate"`
|
||||
|
||||
maxMsgPacketPayloadSize int
|
||||
|
||||
flushThrottle time.Duration
|
||||
}
|
||||
|
||||
func (cfg *MConnConfig) maxMsgPacketTotalSize() int {
|
||||
return cfg.maxMsgPacketPayloadSize + maxMsgPacketOverheadSize
|
||||
}
|
||||
|
||||
// DefaultMConnConfig returns the default config.
|
||||
func DefaultMConnConfig() *MConnConfig {
|
||||
return &MConnConfig{
|
||||
SendRate: defaultSendRate,
|
||||
RecvRate: defaultRecvRate,
|
||||
SendRate: defaultSendRate,
|
||||
RecvRate: defaultRecvRate,
|
||||
maxMsgPacketPayloadSize: defaultMaxMsgPacketPayloadSize,
|
||||
flushThrottle: defaultFlushThrottle,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,10 +159,11 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec
|
||||
return mconn
|
||||
}
|
||||
|
||||
// OnStart implements BaseService
|
||||
func (c *MConnection) OnStart() error {
|
||||
c.BaseService.OnStart()
|
||||
c.quit = make(chan struct{})
|
||||
c.flushTimer = cmn.NewThrottleTimer("flush", flushThrottle)
|
||||
c.flushTimer = cmn.NewThrottleTimer("flush", c.config.flushThrottle)
|
||||
c.pingTimer = cmn.NewRepeatTimer("ping", pingTimeout)
|
||||
c.chStatsTimer = cmn.NewRepeatTimer("chStats", updateState)
|
||||
go c.sendRoutine()
|
||||
@@ -156,6 +171,7 @@ func (c *MConnection) OnStart() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements BaseService
|
||||
func (c *MConnection) OnStop() {
|
||||
c.BaseService.OnStop()
|
||||
c.flushTimer.Stop()
|
||||
@@ -333,7 +349,7 @@ func (c *MConnection) sendSomeMsgPackets() bool {
|
||||
// Block until .sendMonitor says we can write.
|
||||
// Once we're ready we send more than we asked for,
|
||||
// but amortized it should even out.
|
||||
c.sendMonitor.Limit(maxMsgPacketTotalSize, atomic.LoadInt64(&c.config.SendRate), true)
|
||||
c.sendMonitor.Limit(c.config.maxMsgPacketTotalSize(), atomic.LoadInt64(&c.config.SendRate), true)
|
||||
|
||||
// Now send some msgPackets.
|
||||
for i := 0; i < numBatchMsgPackets; i++ {
|
||||
@@ -391,7 +407,7 @@ func (c *MConnection) recvRoutine() {
|
||||
FOR_LOOP:
|
||||
for {
|
||||
// Block until .recvMonitor says we can read.
|
||||
c.recvMonitor.Limit(maxMsgPacketTotalSize, atomic.LoadInt64(&c.config.RecvRate), true)
|
||||
c.recvMonitor.Limit(c.config.maxMsgPacketTotalSize(), atomic.LoadInt64(&c.config.RecvRate), true)
|
||||
|
||||
/*
|
||||
// Peek into bufReader for debugging
|
||||
@@ -432,7 +448,7 @@ FOR_LOOP:
|
||||
c.Logger.Debug("Receive Pong")
|
||||
case packetTypeMsg:
|
||||
pkt, n, err := msgPacket{}, int(0), error(nil)
|
||||
wire.ReadBinaryPtr(&pkt, c.bufReader, maxMsgPacketTotalSize, &n, &err)
|
||||
wire.ReadBinaryPtr(&pkt, c.bufReader, c.config.maxMsgPacketTotalSize(), &n, &err)
|
||||
c.recvMonitor.Update(int(n))
|
||||
if err != nil {
|
||||
if c.IsRunning() {
|
||||
@@ -455,6 +471,7 @@ FOR_LOOP:
|
||||
}
|
||||
if msgBytes != nil {
|
||||
c.Logger.Debug("Received bytes", "chID", pkt.ChannelID, "msgBytes", msgBytes)
|
||||
// NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine
|
||||
c.onReceive(pkt.ChannelID, msgBytes)
|
||||
}
|
||||
default:
|
||||
@@ -538,6 +555,8 @@ type Channel struct {
|
||||
sending []byte
|
||||
priority int
|
||||
recentlySent int64 // exponential moving average
|
||||
|
||||
maxMsgPacketPayloadSize int
|
||||
}
|
||||
|
||||
func newChannel(conn *MConnection, desc *ChannelDescriptor) *Channel {
|
||||
@@ -546,12 +565,13 @@ func newChannel(conn *MConnection, desc *ChannelDescriptor) *Channel {
|
||||
cmn.PanicSanity("Channel default priority must be a postive integer")
|
||||
}
|
||||
return &Channel{
|
||||
conn: conn,
|
||||
desc: desc,
|
||||
id: desc.ID,
|
||||
sendQueue: make(chan []byte, desc.SendQueueCapacity),
|
||||
recving: make([]byte, 0, desc.RecvBufferCapacity),
|
||||
priority: desc.Priority,
|
||||
conn: conn,
|
||||
desc: desc,
|
||||
id: desc.ID,
|
||||
sendQueue: make(chan []byte, desc.SendQueueCapacity),
|
||||
recving: make([]byte, 0, desc.RecvBufferCapacity),
|
||||
priority: desc.Priority,
|
||||
maxMsgPacketPayloadSize: conn.config.maxMsgPacketPayloadSize,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -610,14 +630,15 @@ func (ch *Channel) isSendPending() bool {
|
||||
func (ch *Channel) nextMsgPacket() msgPacket {
|
||||
packet := msgPacket{}
|
||||
packet.ChannelID = byte(ch.id)
|
||||
packet.Bytes = ch.sending[:cmn.MinInt(maxMsgPacketPayloadSize, len(ch.sending))]
|
||||
if len(ch.sending) <= maxMsgPacketPayloadSize {
|
||||
maxSize := ch.maxMsgPacketPayloadSize
|
||||
packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))]
|
||||
if len(ch.sending) <= maxSize {
|
||||
packet.EOF = byte(0x01)
|
||||
ch.sending = nil
|
||||
atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize
|
||||
} else {
|
||||
packet.EOF = byte(0x00)
|
||||
ch.sending = ch.sending[cmn.MinInt(maxMsgPacketPayloadSize, len(ch.sending)):]
|
||||
ch.sending = ch.sending[cmn.MinInt(maxSize, len(ch.sending)):]
|
||||
}
|
||||
return packet
|
||||
}
|
||||
@@ -666,9 +687,9 @@ func (ch *Channel) updateStats() {
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
const (
|
||||
maxMsgPacketPayloadSize = 1024
|
||||
defaultMaxMsgPacketPayloadSize = 1024
|
||||
|
||||
maxMsgPacketOverheadSize = 10 // It's actually lower but good enough
|
||||
maxMsgPacketTotalSize = maxMsgPacketPayloadSize + maxMsgPacketOverheadSize
|
||||
packetTypePing = byte(0x01)
|
||||
packetTypePong = byte(0x02)
|
||||
packetTypeMsg = byte(0x03)
|
||||
|
@@ -87,7 +87,7 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log
|
||||
}
|
||||
// Otherwise just use the local address...
|
||||
if extAddr == nil {
|
||||
extAddr = getNaiveExternalAddress(listenerPort)
|
||||
extAddr = getNaiveExternalAddress(listenerPort, false, logger)
|
||||
}
|
||||
if extAddr == nil {
|
||||
cmn.PanicCrisis("Could not determine external address!")
|
||||
@@ -197,7 +197,7 @@ func getUPNPExternalAddress(externalPort, internalPort int, logger log.Logger) *
|
||||
}
|
||||
|
||||
// TODO: use syscalls: http://pastebin.com/9exZG4rh
|
||||
func getNaiveExternalAddress(port int) *NetAddress {
|
||||
func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) *NetAddress {
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
cmn.PanicCrisis(cmn.Fmt("Could not fetch interface addresses: %v", err))
|
||||
@@ -209,10 +209,13 @@ func getNaiveExternalAddress(port int) *NetAddress {
|
||||
continue
|
||||
}
|
||||
v4 := ipnet.IP.To4()
|
||||
if v4 == nil || v4[0] == 127 {
|
||||
if v4 == nil || (!settleForLocal && v4[0] == 127) {
|
||||
continue
|
||||
} // loopback
|
||||
return NewNetAddressIPPort(ipnet.IP, uint16(port))
|
||||
}
|
||||
return nil
|
||||
|
||||
// try again, but settle for local
|
||||
logger.Info("Node may not be connected to internet. Settling for local address")
|
||||
return getNaiveExternalAddress(port, true, logger)
|
||||
}
|
||||
|
114
p2p/peer.go
114
p2p/peer.go
@@ -12,12 +12,29 @@ import (
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
// Peer is an interface representing a peer connected on a reactor.
|
||||
type Peer interface {
|
||||
cmn.Service
|
||||
|
||||
Key() string
|
||||
IsOutbound() bool
|
||||
IsPersistent() bool
|
||||
NodeInfo() *NodeInfo
|
||||
Status() ConnectionStatus
|
||||
|
||||
Send(byte, interface{}) bool
|
||||
TrySend(byte, interface{}) bool
|
||||
|
||||
Set(string, interface{})
|
||||
Get(string) interface{}
|
||||
}
|
||||
|
||||
// Peer could be marked as persistent, in which case you can use
|
||||
// Redial function to reconnect. Note that inbound peers can't be
|
||||
// made persistent. They should be made persistent on the other end.
|
||||
//
|
||||
// Before using a peer, you will need to perform a handshake on connection.
|
||||
type Peer struct {
|
||||
type peer struct {
|
||||
cmn.BaseService
|
||||
|
||||
outbound bool
|
||||
@@ -28,9 +45,9 @@ type Peer struct {
|
||||
persistent bool
|
||||
config *PeerConfig
|
||||
|
||||
*NodeInfo
|
||||
Key string
|
||||
Data *cmn.CMap // User data.
|
||||
nodeInfo *NodeInfo
|
||||
key string
|
||||
Data *cmn.CMap // User data.
|
||||
}
|
||||
|
||||
// PeerConfig is a Peer configuration.
|
||||
@@ -59,11 +76,9 @@ func DefaultPeerConfig() *PeerConfig {
|
||||
}
|
||||
}
|
||||
|
||||
func newOutboundPeer(addr *NetAddress, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor, onPeerError func(*Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519) (*Peer, error) {
|
||||
return newOutboundPeerWithConfig(addr, reactorsByCh, chDescs, onPeerError, ourNodePrivKey, DefaultPeerConfig())
|
||||
}
|
||||
func newOutboundPeer(addr *NetAddress, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor,
|
||||
onPeerError func(Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*peer, error) {
|
||||
|
||||
func newOutboundPeerWithConfig(addr *NetAddress, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor, onPeerError func(*Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*Peer, error) {
|
||||
conn, err := dial(addr, config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error creating peer")
|
||||
@@ -77,15 +92,15 @@ func newOutboundPeerWithConfig(addr *NetAddress, reactorsByCh map[byte]Reactor,
|
||||
return peer, nil
|
||||
}
|
||||
|
||||
func newInboundPeer(conn net.Conn, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor, onPeerError func(*Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519) (*Peer, error) {
|
||||
return newInboundPeerWithConfig(conn, reactorsByCh, chDescs, onPeerError, ourNodePrivKey, DefaultPeerConfig())
|
||||
}
|
||||
func newInboundPeer(conn net.Conn, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor,
|
||||
onPeerError func(Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*peer, error) {
|
||||
|
||||
func newInboundPeerWithConfig(conn net.Conn, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor, onPeerError func(*Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*Peer, error) {
|
||||
return newPeerFromConnAndConfig(conn, false, reactorsByCh, chDescs, onPeerError, ourNodePrivKey, config)
|
||||
}
|
||||
|
||||
func newPeerFromConnAndConfig(rawConn net.Conn, outbound bool, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor, onPeerError func(*Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*Peer, error) {
|
||||
func newPeerFromConnAndConfig(rawConn net.Conn, outbound bool, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor,
|
||||
onPeerError func(Peer, interface{}), ourNodePrivKey crypto.PrivKeyEd25519, config *PeerConfig) (*peer, error) {
|
||||
|
||||
conn := rawConn
|
||||
|
||||
// Fuzz connection
|
||||
@@ -106,7 +121,7 @@ func newPeerFromConnAndConfig(rawConn net.Conn, outbound bool, reactorsByCh map[
|
||||
}
|
||||
|
||||
// Key and NodeInfo are set after Handshake
|
||||
p := &Peer{
|
||||
p := &peer{
|
||||
outbound: outbound,
|
||||
conn: conn,
|
||||
config: config,
|
||||
@@ -121,12 +136,12 @@ func newPeerFromConnAndConfig(rawConn net.Conn, outbound bool, reactorsByCh map[
|
||||
}
|
||||
|
||||
// CloseConn should be used when the peer was created, but never started.
|
||||
func (p *Peer) CloseConn() {
|
||||
func (p *peer) CloseConn() {
|
||||
p.conn.Close()
|
||||
}
|
||||
|
||||
// makePersistent marks the peer as persistent.
|
||||
func (p *Peer) makePersistent() {
|
||||
func (p *peer) makePersistent() {
|
||||
if !p.outbound {
|
||||
panic("inbound peers can't be made persistent")
|
||||
}
|
||||
@@ -135,13 +150,13 @@ func (p *Peer) makePersistent() {
|
||||
}
|
||||
|
||||
// IsPersistent returns true if the peer is persitent, false otherwise.
|
||||
func (p *Peer) IsPersistent() bool {
|
||||
func (p *peer) IsPersistent() bool {
|
||||
return p.persistent
|
||||
}
|
||||
|
||||
// HandshakeTimeout performs a handshake between a given node and the peer.
|
||||
// NOTE: blocking
|
||||
func (p *Peer) HandshakeTimeout(ourNodeInfo *NodeInfo, timeout time.Duration) error {
|
||||
func (p *peer) HandshakeTimeout(ourNodeInfo *NodeInfo, timeout time.Duration) error {
|
||||
// Set deadline for handshake so we don't block forever on conn.ReadFull
|
||||
p.conn.SetDeadline(time.Now().Add(timeout))
|
||||
|
||||
@@ -178,19 +193,19 @@ func (p *Peer) HandshakeTimeout(ourNodeInfo *NodeInfo, timeout time.Duration) er
|
||||
|
||||
peerNodeInfo.RemoteAddr = p.Addr().String()
|
||||
|
||||
p.NodeInfo = peerNodeInfo
|
||||
p.Key = peerNodeInfo.PubKey.KeyString()
|
||||
p.nodeInfo = peerNodeInfo
|
||||
p.key = peerNodeInfo.PubKey.KeyString()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Addr returns peer's remote network address.
|
||||
func (p *Peer) Addr() net.Addr {
|
||||
func (p *peer) Addr() net.Addr {
|
||||
return p.conn.RemoteAddr()
|
||||
}
|
||||
|
||||
// PubKey returns peer's public key.
|
||||
func (p *Peer) PubKey() crypto.PubKeyEd25519 {
|
||||
func (p *peer) PubKey() crypto.PubKeyEd25519 {
|
||||
if p.config.AuthEnc {
|
||||
return p.conn.(*SecretConnection).RemotePubKey()
|
||||
}
|
||||
@@ -201,31 +216,31 @@ func (p *Peer) PubKey() crypto.PubKeyEd25519 {
|
||||
}
|
||||
|
||||
// OnStart implements BaseService.
|
||||
func (p *Peer) OnStart() error {
|
||||
func (p *peer) OnStart() error {
|
||||
p.BaseService.OnStart()
|
||||
_, err := p.mconn.Start()
|
||||
return err
|
||||
}
|
||||
|
||||
// OnStop implements BaseService.
|
||||
func (p *Peer) OnStop() {
|
||||
func (p *peer) OnStop() {
|
||||
p.BaseService.OnStop()
|
||||
p.mconn.Stop()
|
||||
}
|
||||
|
||||
// Connection returns underlying MConnection.
|
||||
func (p *Peer) Connection() *MConnection {
|
||||
func (p *peer) Connection() *MConnection {
|
||||
return p.mconn
|
||||
}
|
||||
|
||||
// IsOutbound returns true if the connection is outbound, false otherwise.
|
||||
func (p *Peer) IsOutbound() bool {
|
||||
func (p *peer) IsOutbound() bool {
|
||||
return p.outbound
|
||||
}
|
||||
|
||||
// Send msg to the channel identified by chID byte. Returns false if the send
|
||||
// queue is full after timeout, specified by MConnection.
|
||||
func (p *Peer) Send(chID byte, msg interface{}) bool {
|
||||
func (p *peer) Send(chID byte, msg interface{}) bool {
|
||||
if !p.IsRunning() {
|
||||
// see Switch#Broadcast, where we fetch the list of peers and loop over
|
||||
// them - while we're looping, one peer may be removed and stopped.
|
||||
@@ -236,7 +251,7 @@ func (p *Peer) Send(chID byte, msg interface{}) bool {
|
||||
|
||||
// TrySend msg to the channel identified by chID byte. Immediately returns
|
||||
// false if the send queue is full.
|
||||
func (p *Peer) TrySend(chID byte, msg interface{}) bool {
|
||||
func (p *peer) TrySend(chID byte, msg interface{}) bool {
|
||||
if !p.IsRunning() {
|
||||
return false
|
||||
}
|
||||
@@ -244,7 +259,7 @@ func (p *Peer) TrySend(chID byte, msg interface{}) bool {
|
||||
}
|
||||
|
||||
// CanSend returns true if the send queue is not full, false otherwise.
|
||||
func (p *Peer) CanSend(chID byte) bool {
|
||||
func (p *peer) CanSend(chID byte) bool {
|
||||
if !p.IsRunning() {
|
||||
return false
|
||||
}
|
||||
@@ -252,32 +267,53 @@ func (p *Peer) CanSend(chID byte) bool {
|
||||
}
|
||||
|
||||
// WriteTo writes the peer's public key to w.
|
||||
func (p *Peer) WriteTo(w io.Writer) (n int64, err error) {
|
||||
func (p *peer) WriteTo(w io.Writer) (n int64, err error) {
|
||||
var n_ int
|
||||
wire.WriteString(p.Key, w, &n_, &err)
|
||||
wire.WriteString(p.key, w, &n_, &err)
|
||||
n += int64(n_)
|
||||
return
|
||||
}
|
||||
|
||||
// String representation.
|
||||
func (p *Peer) String() string {
|
||||
func (p *peer) String() string {
|
||||
if p.outbound {
|
||||
return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.Key[:12])
|
||||
return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.key[:12])
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.Key[:12])
|
||||
return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.key[:12])
|
||||
}
|
||||
|
||||
// Equals reports whenever 2 peers are actually represent the same node.
|
||||
func (p *Peer) Equals(other *Peer) bool {
|
||||
return p.Key == other.Key
|
||||
func (p *peer) Equals(other Peer) bool {
|
||||
return p.key == other.Key()
|
||||
}
|
||||
|
||||
// Get the data for a given key.
|
||||
func (p *Peer) Get(key string) interface{} {
|
||||
func (p *peer) Get(key string) interface{} {
|
||||
return p.Data.Get(key)
|
||||
}
|
||||
|
||||
// Set sets the data for the given key.
|
||||
func (p *peer) Set(key string, data interface{}) {
|
||||
p.Data.Set(key, data)
|
||||
}
|
||||
|
||||
// Key returns the peer's id key.
|
||||
func (p *peer) Key() string {
|
||||
return p.key
|
||||
}
|
||||
|
||||
// NodeInfo returns a copy of the peer's NodeInfo.
|
||||
func (p *peer) NodeInfo() *NodeInfo {
|
||||
n := *p.nodeInfo // copy
|
||||
return &n
|
||||
}
|
||||
|
||||
// Status returns the peer's ConnectionStatus.
|
||||
func (p *peer) Status() ConnectionStatus {
|
||||
return p.mconn.Status()
|
||||
}
|
||||
|
||||
func dial(addr *NetAddress, config *PeerConfig) (net.Conn, error) {
|
||||
conn, err := addr.DialTimeout(config.DialTimeout * time.Second)
|
||||
if err != nil {
|
||||
@@ -286,7 +322,9 @@ func dial(addr *NetAddress, config *PeerConfig) (net.Conn, error) {
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func createMConnection(conn net.Conn, p *Peer, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor, onPeerError func(*Peer, interface{}), config *MConnConfig) *MConnection {
|
||||
func createMConnection(conn net.Conn, p *peer, reactorsByCh map[byte]Reactor, chDescs []*ChannelDescriptor,
|
||||
onPeerError func(Peer, interface{}), config *MConnConfig) *MConnection {
|
||||
|
||||
onReceive := func(chID byte, msgBytes []byte) {
|
||||
reactor := reactorsByCh[chID]
|
||||
if reactor == nil {
|
||||
|
@@ -7,8 +7,8 @@ import (
|
||||
// IPeerSet has a (immutable) subset of the methods of PeerSet.
|
||||
type IPeerSet interface {
|
||||
Has(key string) bool
|
||||
Get(key string) *Peer
|
||||
List() []*Peer
|
||||
Get(key string) Peer
|
||||
List() []Peer
|
||||
Size() int
|
||||
}
|
||||
|
||||
@@ -19,26 +19,28 @@ type IPeerSet interface {
|
||||
type PeerSet struct {
|
||||
mtx sync.Mutex
|
||||
lookup map[string]*peerSetItem
|
||||
list []*Peer
|
||||
list []Peer
|
||||
}
|
||||
|
||||
type peerSetItem struct {
|
||||
peer *Peer
|
||||
peer Peer
|
||||
index int
|
||||
}
|
||||
|
||||
// NewPeerSet creates a new peerSet with a list of initial capacity of 256 items.
|
||||
func NewPeerSet() *PeerSet {
|
||||
return &PeerSet{
|
||||
lookup: make(map[string]*peerSetItem),
|
||||
list: make([]*Peer, 0, 256),
|
||||
list: make([]Peer, 0, 256),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns false if peer with key (PubKeyEd25519) is already set
|
||||
func (ps *PeerSet) Add(peer *Peer) error {
|
||||
// Add adds the peer to the PeerSet.
|
||||
// It returns ErrSwitchDuplicatePeer if the peer is already present.
|
||||
func (ps *PeerSet) Add(peer Peer) error {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
if ps.lookup[peer.Key] != nil {
|
||||
if ps.lookup[peer.Key()] != nil {
|
||||
return ErrSwitchDuplicatePeer
|
||||
}
|
||||
|
||||
@@ -46,18 +48,21 @@ func (ps *PeerSet) Add(peer *Peer) error {
|
||||
// Appending is safe even with other goroutines
|
||||
// iterating over the ps.list slice.
|
||||
ps.list = append(ps.list, peer)
|
||||
ps.lookup[peer.Key] = &peerSetItem{peer, index}
|
||||
ps.lookup[peer.Key()] = &peerSetItem{peer, index}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Has returns true iff the PeerSet contains
|
||||
// the peer referred to by this peerKey.
|
||||
func (ps *PeerSet) Has(peerKey string) bool {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
_, ok := ps.lookup[peerKey]
|
||||
ps.mtx.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
func (ps *PeerSet) Get(peerKey string) *Peer {
|
||||
// Get looks up a peer by the provided peerKey.
|
||||
func (ps *PeerSet) Get(peerKey string) Peer {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
item, ok := ps.lookup[peerKey]
|
||||
@@ -68,45 +73,46 @@ func (ps *PeerSet) Get(peerKey string) *Peer {
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *PeerSet) Remove(peer *Peer) {
|
||||
// Remove discards peer by its Key, if the peer was previously memoized.
|
||||
func (ps *PeerSet) Remove(peer Peer) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
item := ps.lookup[peer.Key]
|
||||
item := ps.lookup[peer.Key()]
|
||||
if item == nil {
|
||||
return
|
||||
}
|
||||
|
||||
index := item.index
|
||||
// Copy the list but without the last element.
|
||||
// (we must copy because we're mutating the list)
|
||||
newList := make([]*Peer, len(ps.list)-1)
|
||||
// Create a new copy of the list but with one less item.
|
||||
// (we must copy because we'll be mutating the list).
|
||||
newList := make([]Peer, len(ps.list)-1)
|
||||
copy(newList, ps.list)
|
||||
// If it's the last peer, that's an easy special case.
|
||||
if index == len(ps.list)-1 {
|
||||
ps.list = newList
|
||||
delete(ps.lookup, peer.Key)
|
||||
delete(ps.lookup, peer.Key())
|
||||
return
|
||||
}
|
||||
|
||||
// Move the last item from ps.list to "index" in list.
|
||||
// Replace the popped item with the last item in the old list.
|
||||
lastPeer := ps.list[len(ps.list)-1]
|
||||
lastPeerKey := lastPeer.Key
|
||||
lastPeerKey := lastPeer.Key()
|
||||
lastPeerItem := ps.lookup[lastPeerKey]
|
||||
newList[index] = lastPeer
|
||||
lastPeerItem.index = index
|
||||
ps.list = newList
|
||||
delete(ps.lookup, peer.Key)
|
||||
|
||||
delete(ps.lookup, peer.Key())
|
||||
}
|
||||
|
||||
// Size returns the number of unique items in the peerSet.
|
||||
func (ps *PeerSet) Size() int {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
return len(ps.list)
|
||||
}
|
||||
|
||||
// threadsafe list of peers.
|
||||
func (ps *PeerSet) List() []*Peer {
|
||||
// List returns the threadsafe list of peers.
|
||||
func (ps *PeerSet) List() []Peer {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
return ps.list
|
||||
|
@@ -2,47 +2,69 @@ package p2p
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
)
|
||||
|
||||
// Returns an empty dummy peer
|
||||
func randPeer() *Peer {
|
||||
return &Peer{
|
||||
Key: cmn.RandStr(12),
|
||||
NodeInfo: &NodeInfo{
|
||||
func randPeer() *peer {
|
||||
return &peer{
|
||||
key: cmn.RandStr(12),
|
||||
nodeInfo: &NodeInfo{
|
||||
RemoteAddr: cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256),
|
||||
ListenAddr: cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddRemoveOne(t *testing.T) {
|
||||
func TestPeerSetAddRemoveOne(t *testing.T) {
|
||||
t.Parallel()
|
||||
peerSet := NewPeerSet()
|
||||
|
||||
peer := randPeer()
|
||||
err := peerSet.Add(peer)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to add new peer")
|
||||
}
|
||||
if peerSet.Size() != 1 {
|
||||
t.Errorf("Failed to add new peer and increment size")
|
||||
var peerList []Peer
|
||||
for i := 0; i < 5; i++ {
|
||||
p := randPeer()
|
||||
peerSet.Add(p)
|
||||
peerList = append(peerList, p)
|
||||
}
|
||||
|
||||
peerSet.Remove(peer)
|
||||
if peerSet.Has(peer.Key) {
|
||||
t.Errorf("Failed to remove peer")
|
||||
n := len(peerList)
|
||||
// 1. Test removing from the front
|
||||
for i, peerAtFront := range peerList {
|
||||
peerSet.Remove(peerAtFront)
|
||||
wantSize := n - i - 1
|
||||
for j := 0; j < 2; j++ {
|
||||
assert.Equal(t, false, peerSet.Has(peerAtFront.Key()), "#%d Run #%d: failed to remove peer", i, j)
|
||||
assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j)
|
||||
// Test the route of removing the now non-existent element
|
||||
peerSet.Remove(peerAtFront)
|
||||
}
|
||||
}
|
||||
if peerSet.Size() != 0 {
|
||||
t.Errorf("Failed to remove peer and decrement size")
|
||||
|
||||
// 2. Next we are testing removing the peer at the end
|
||||
// a) Replenish the peerSet
|
||||
for _, peer := range peerList {
|
||||
peerSet.Add(peer)
|
||||
}
|
||||
|
||||
// b) In reverse, remove each element
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
peerAtEnd := peerList[i]
|
||||
peerSet.Remove(peerAtEnd)
|
||||
assert.Equal(t, false, peerSet.Has(peerAtEnd.Key()), "#%d: failed to remove item at end", i)
|
||||
assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddRemoveMany(t *testing.T) {
|
||||
func TestPeerSetAddRemoveMany(t *testing.T) {
|
||||
t.Parallel()
|
||||
peerSet := NewPeerSet()
|
||||
|
||||
peers := []*Peer{}
|
||||
peers := []Peer{}
|
||||
N := 100
|
||||
for i := 0; i < N; i++ {
|
||||
peer := randPeer()
|
||||
@@ -57,7 +79,7 @@ func TestAddRemoveMany(t *testing.T) {
|
||||
|
||||
for i, peer := range peers {
|
||||
peerSet.Remove(peer)
|
||||
if peerSet.Has(peer.Key) {
|
||||
if peerSet.Has(peer.Key()) {
|
||||
t.Errorf("Failed to remove peer")
|
||||
}
|
||||
if peerSet.Size() != len(peers)-i-1 {
|
||||
@@ -65,3 +87,61 @@ func TestAddRemoveMany(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerSetAddDuplicate(t *testing.T) {
|
||||
t.Parallel()
|
||||
peerSet := NewPeerSet()
|
||||
peer := randPeer()
|
||||
|
||||
n := 20
|
||||
errsChan := make(chan error)
|
||||
// Add the same asynchronously to test the
|
||||
// concurrent guarantees of our APIs, and
|
||||
// our expectation in the end is that only
|
||||
// one addition succeeded, but the rest are
|
||||
// instances of ErrSwitchDuplicatePeer.
|
||||
for i := 0; i < n; i++ {
|
||||
go func() {
|
||||
errsChan <- peerSet.Add(peer)
|
||||
}()
|
||||
}
|
||||
|
||||
// Now collect and tally the results
|
||||
errsTally := make(map[error]int)
|
||||
for i := 0; i < n; i++ {
|
||||
err := <-errsChan
|
||||
errsTally[err] += 1
|
||||
}
|
||||
|
||||
// Our next procedure is to ensure that only one addition
|
||||
// succeeded and that the rest are each ErrSwitchDuplicatePeer.
|
||||
wantErrCount, gotErrCount := n-1, errsTally[ErrSwitchDuplicatePeer]
|
||||
assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count")
|
||||
|
||||
wantNilErrCount, gotNilErrCount := 1, errsTally[nil]
|
||||
assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount")
|
||||
}
|
||||
|
||||
func TestPeerSetGet(t *testing.T) {
|
||||
t.Parallel()
|
||||
peerSet := NewPeerSet()
|
||||
peer := randPeer()
|
||||
assert.Nil(t, peerSet.Get(peer.Key()), "expecting a nil lookup, before .Add")
|
||||
|
||||
if err := peerSet.Add(peer); err != nil {
|
||||
t.Fatalf("Failed to add new peer: %v", err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 10; i++ {
|
||||
// Add them asynchronously to test the
|
||||
// concurrent guarantees of our APIs.
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
got, want := peerSet.Get(peer.Key()), peer
|
||||
assert.Equal(t, got, want, "#%d: got=%v want=%v", i, got, want)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
@@ -76,13 +76,13 @@ func TestPeerSend(t *testing.T) {
|
||||
assert.True(p.Send(0x01, "Asylum"))
|
||||
}
|
||||
|
||||
func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig) (*Peer, error) {
|
||||
func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig) (*peer, error) {
|
||||
chDescs := []*ChannelDescriptor{
|
||||
&ChannelDescriptor{ID: 0x01, Priority: 1},
|
||||
}
|
||||
reactorsByCh := map[byte]Reactor{0x01: NewTestReactor(chDescs, true)}
|
||||
pk := crypto.GenPrivKeyEd25519()
|
||||
p, err := newOutboundPeerWithConfig(addr, reactorsByCh, chDescs, func(p *Peer, r interface{}) {}, pk, config)
|
||||
p, err := newOutboundPeer(addr, reactorsByCh, chDescs, func(p Peer, r interface{}) {}, pk, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -133,7 +133,7 @@ func (p *remotePeer) accept(l net.Listener) {
|
||||
if err != nil {
|
||||
golog.Fatalf("Failed to accept conn: %+v", err)
|
||||
}
|
||||
peer, err := newInboundPeerWithConfig(conn, make(map[byte]Reactor), make([]*ChannelDescriptor, 0), func(p *Peer, r interface{}) {}, p.PrivKey, p.Config)
|
||||
peer, err := newInboundPeer(conn, make(map[byte]Reactor), make([]*ChannelDescriptor, 0), func(p Peer, r interface{}) {}, p.PrivKey, p.Config)
|
||||
if err != nil {
|
||||
golog.Fatalf("Failed to create a peer: %+v", err)
|
||||
}
|
||||
|
@@ -92,7 +92,7 @@ func (r *PEXReactor) GetChannels() []*ChannelDescriptor {
|
||||
|
||||
// AddPeer implements Reactor by adding peer to the address book (if inbound)
|
||||
// or by requesting more addresses (if outbound).
|
||||
func (r *PEXReactor) AddPeer(p *Peer) {
|
||||
func (r *PEXReactor) AddPeer(p Peer) {
|
||||
if p.IsOutbound() {
|
||||
// For outbound peers, the address is already in the books.
|
||||
// Either it was added in DialSeeds or when we
|
||||
@@ -101,10 +101,10 @@ func (r *PEXReactor) AddPeer(p *Peer) {
|
||||
r.RequestPEX(p)
|
||||
}
|
||||
} else { // For inbound connections, the peer is its own source
|
||||
addr, err := NewNetAddressString(p.ListenAddr)
|
||||
addr, err := NewNetAddressString(p.NodeInfo().ListenAddr)
|
||||
if err != nil {
|
||||
// this should never happen
|
||||
r.Logger.Error("Error in AddPeer: invalid peer address", "addr", p.ListenAddr, "err", err)
|
||||
r.Logger.Error("Error in AddPeer: invalid peer address", "addr", p.NodeInfo().ListenAddr, "err", err)
|
||||
return
|
||||
}
|
||||
r.book.AddAddress(addr, addr)
|
||||
@@ -112,15 +112,15 @@ func (r *PEXReactor) AddPeer(p *Peer) {
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor.
|
||||
func (r *PEXReactor) RemovePeer(p *Peer, reason interface{}) {
|
||||
func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) {
|
||||
// If we aren't keeping track of local temp data for each peer here, then we
|
||||
// don't have to do anything.
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling incoming PEX messages.
|
||||
func (r *PEXReactor) Receive(chID byte, src *Peer, msgBytes []byte) {
|
||||
srcAddr := src.Connection().RemoteAddress
|
||||
srcAddrStr := srcAddr.String()
|
||||
func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
|
||||
srcAddrStr := src.NodeInfo().RemoteAddr
|
||||
srcAddr, _ := NewNetAddressString(srcAddrStr)
|
||||
|
||||
r.IncrementMsgCountForPeer(srcAddrStr)
|
||||
if r.ReachedMaxMsgCountForPeer(srcAddrStr) {
|
||||
@@ -154,12 +154,12 @@ func (r *PEXReactor) Receive(chID byte, src *Peer, msgBytes []byte) {
|
||||
}
|
||||
|
||||
// RequestPEX asks peer for more addresses.
|
||||
func (r *PEXReactor) RequestPEX(p *Peer) {
|
||||
func (r *PEXReactor) RequestPEX(p Peer) {
|
||||
p.Send(PexChannel, struct{ PexMessage }{&pexRequestMessage{}})
|
||||
}
|
||||
|
||||
// SendAddrs sends addrs to the peer.
|
||||
func (r *PEXReactor) SendAddrs(p *Peer, addrs []*NetAddress) {
|
||||
func (r *PEXReactor) SendAddrs(p Peer, addrs []*NetAddress) {
|
||||
p.Send(PexChannel, struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}})
|
||||
}
|
||||
|
||||
|
@@ -120,7 +120,7 @@ func TestPEXReactorReceive(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "pex_reactor")
|
||||
require.Nil(err)
|
||||
defer os.RemoveAll(dir)
|
||||
book := NewAddrBook(dir+"addrbook.json", true)
|
||||
book := NewAddrBook(dir+"addrbook.json", false)
|
||||
book.SetLogger(log.TestingLogger())
|
||||
|
||||
r := NewPEXReactor(book)
|
||||
@@ -129,7 +129,7 @@ func TestPEXReactorReceive(t *testing.T) {
|
||||
peer := createRandomPeer(false)
|
||||
|
||||
size := book.Size()
|
||||
netAddr, _ := NewNetAddressString(peer.ListenAddr)
|
||||
netAddr, _ := NewNetAddressString(peer.NodeInfo().ListenAddr)
|
||||
addrs := []*NetAddress{netAddr}
|
||||
msg := wire.BinaryBytes(struct{ PexMessage }{&pexAddrsMessage{Addrs: addrs}})
|
||||
r.Receive(PexChannel, peer, msg)
|
||||
@@ -159,16 +159,17 @@ func TestPEXReactorAbuseFromPeer(t *testing.T) {
|
||||
r.Receive(PexChannel, peer, msg)
|
||||
}
|
||||
|
||||
assert.True(r.ReachedMaxMsgCountForPeer(peer.ListenAddr))
|
||||
assert.True(r.ReachedMaxMsgCountForPeer(peer.NodeInfo().ListenAddr))
|
||||
}
|
||||
|
||||
func createRandomPeer(outbound bool) *Peer {
|
||||
func createRandomPeer(outbound bool) *peer {
|
||||
addr := cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256)
|
||||
netAddr, _ := NewNetAddressString(addr)
|
||||
p := &Peer{
|
||||
Key: cmn.RandStr(12),
|
||||
NodeInfo: &NodeInfo{
|
||||
p := &peer{
|
||||
key: cmn.RandStr(12),
|
||||
nodeInfo: &NodeInfo{
|
||||
ListenAddr: addr,
|
||||
RemoteAddr: netAddr.String(),
|
||||
},
|
||||
outbound: outbound,
|
||||
mconn: &MConnection{RemoteAddress: netAddr},
|
||||
|
123
p2p/switch.go
123
p2p/switch.go
@@ -22,9 +22,9 @@ type Reactor interface {
|
||||
|
||||
SetSwitch(*Switch)
|
||||
GetChannels() []*ChannelDescriptor
|
||||
AddPeer(peer *Peer)
|
||||
RemovePeer(peer *Peer, reason interface{})
|
||||
Receive(chID byte, peer *Peer, msgBytes []byte)
|
||||
AddPeer(peer Peer)
|
||||
RemovePeer(peer Peer, reason interface{})
|
||||
Receive(chID byte, peer Peer, msgBytes []byte)
|
||||
}
|
||||
|
||||
//--------------------------------------
|
||||
@@ -44,10 +44,10 @@ func NewBaseReactor(name string, impl Reactor) *BaseReactor {
|
||||
func (br *BaseReactor) SetSwitch(sw *Switch) {
|
||||
br.Switch = sw
|
||||
}
|
||||
func (_ *BaseReactor) GetChannels() []*ChannelDescriptor { return nil }
|
||||
func (_ *BaseReactor) AddPeer(peer *Peer) {}
|
||||
func (_ *BaseReactor) RemovePeer(peer *Peer, reason interface{}) {}
|
||||
func (_ *BaseReactor) Receive(chID byte, peer *Peer, msgBytes []byte) {}
|
||||
func (_ *BaseReactor) GetChannels() []*ChannelDescriptor { return nil }
|
||||
func (_ *BaseReactor) AddPeer(peer Peer) {}
|
||||
func (_ *BaseReactor) RemovePeer(peer Peer, reason interface{}) {}
|
||||
func (_ *BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
@@ -90,11 +90,19 @@ func NewSwitch(config *cfg.P2PConfig) *Switch {
|
||||
dialing: cmn.NewCMap(),
|
||||
nodeInfo: nil,
|
||||
}
|
||||
|
||||
// TODO: collapse the peerConfig into the config ?
|
||||
sw.peerConfig.MConfig.flushThrottle = time.Duration(config.FlushThrottleTimeout) * time.Millisecond
|
||||
sw.peerConfig.MConfig.SendRate = config.SendRate
|
||||
sw.peerConfig.MConfig.RecvRate = config.RecvRate
|
||||
sw.peerConfig.MConfig.maxMsgPacketPayloadSize = config.MaxMsgPacketPayloadSize
|
||||
|
||||
sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw)
|
||||
return sw
|
||||
}
|
||||
|
||||
// Not goroutine safe.
|
||||
// AddReactor adds the given reactor to the switch.
|
||||
// NOTE: Not goroutine safe.
|
||||
func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor {
|
||||
// Validate the reactor.
|
||||
// No two reactors can share the same channel.
|
||||
@@ -112,43 +120,51 @@ func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor {
|
||||
return reactor
|
||||
}
|
||||
|
||||
// Not goroutine safe.
|
||||
// Reactors returns a map of reactors registered on the switch.
|
||||
// NOTE: Not goroutine safe.
|
||||
func (sw *Switch) Reactors() map[string]Reactor {
|
||||
return sw.reactors
|
||||
}
|
||||
|
||||
// Not goroutine safe.
|
||||
// Reactor returns the reactor with the given name.
|
||||
// NOTE: Not goroutine safe.
|
||||
func (sw *Switch) Reactor(name string) Reactor {
|
||||
return sw.reactors[name]
|
||||
}
|
||||
|
||||
// Not goroutine safe.
|
||||
// AddListener adds the given listener to the switch for listening to incoming peer connections.
|
||||
// NOTE: Not goroutine safe.
|
||||
func (sw *Switch) AddListener(l Listener) {
|
||||
sw.listeners = append(sw.listeners, l)
|
||||
}
|
||||
|
||||
// Not goroutine safe.
|
||||
// Listeners returns the list of listeners the switch listens on.
|
||||
// NOTE: Not goroutine safe.
|
||||
func (sw *Switch) Listeners() []Listener {
|
||||
return sw.listeners
|
||||
}
|
||||
|
||||
// Not goroutine safe.
|
||||
// IsListening returns true if the switch has at least one listener.
|
||||
// NOTE: Not goroutine safe.
|
||||
func (sw *Switch) IsListening() bool {
|
||||
return len(sw.listeners) > 0
|
||||
}
|
||||
|
||||
// Not goroutine safe.
|
||||
// SetNodeInfo sets the switch's NodeInfo for checking compatibility and handshaking with other nodes.
|
||||
// NOTE: Not goroutine safe.
|
||||
func (sw *Switch) SetNodeInfo(nodeInfo *NodeInfo) {
|
||||
sw.nodeInfo = nodeInfo
|
||||
}
|
||||
|
||||
// Not goroutine safe.
|
||||
// NodeInfo returns the switch's NodeInfo.
|
||||
// NOTE: Not goroutine safe.
|
||||
func (sw *Switch) NodeInfo() *NodeInfo {
|
||||
return sw.nodeInfo
|
||||
}
|
||||
|
||||
// Not goroutine safe.
|
||||
// NOTE: Overwrites sw.nodeInfo.PubKey
|
||||
// SetNodePrivKey sets the switche's private key for authenticated encryption.
|
||||
// NOTE: Overwrites sw.nodeInfo.PubKey.
|
||||
// NOTE: Not goroutine safe.
|
||||
func (sw *Switch) SetNodePrivKey(nodePrivKey crypto.PrivKeyEd25519) {
|
||||
sw.nodePrivKey = nodePrivKey
|
||||
if sw.nodeInfo != nil {
|
||||
@@ -156,7 +172,7 @@ func (sw *Switch) SetNodePrivKey(nodePrivKey crypto.PrivKeyEd25519) {
|
||||
}
|
||||
}
|
||||
|
||||
// Switch.Start() starts all the reactors, peers, and listeners.
|
||||
// OnStart implements BaseService. It starts all the reactors, peers, and listeners.
|
||||
func (sw *Switch) OnStart() error {
|
||||
sw.BaseService.OnStart()
|
||||
// Start reactors
|
||||
@@ -166,10 +182,7 @@ func (sw *Switch) OnStart() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Start peers
|
||||
for _, peer := range sw.peers.List() {
|
||||
sw.startInitPeer(peer)
|
||||
}
|
||||
|
||||
// Start listeners
|
||||
for _, listener := range sw.listeners {
|
||||
go sw.listenerRoutine(listener)
|
||||
@@ -177,6 +190,7 @@ func (sw *Switch) OnStart() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements BaseService. It stops all listeners, peers, and reactors.
|
||||
func (sw *Switch) OnStop() {
|
||||
sw.BaseService.OnStop()
|
||||
// Stop listeners
|
||||
@@ -195,9 +209,12 @@ func (sw *Switch) OnStop() {
|
||||
}
|
||||
}
|
||||
|
||||
// addPeer checks the given peer's validity, performs a handshake, and adds the peer to the switch
|
||||
// and to all registered reactors.
|
||||
// NOTE: This performs a blocking handshake before the peer is added.
|
||||
// CONTRACT: If error is returned, peer is nil, and conn is immediately closed.
|
||||
func (sw *Switch) AddPeer(peer *Peer) error {
|
||||
func (sw *Switch) addPeer(peer *peer) error {
|
||||
|
||||
if err := sw.FilterConnByAddr(peer.Addr()); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -216,12 +233,12 @@ func (sw *Switch) AddPeer(peer *Peer) error {
|
||||
}
|
||||
|
||||
// Check version, chain id
|
||||
if err := sw.nodeInfo.CompatibleWith(peer.NodeInfo); err != nil {
|
||||
if err := sw.nodeInfo.CompatibleWith(peer.NodeInfo()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for duplicate peer
|
||||
if sw.peers.Has(peer.Key) {
|
||||
if sw.peers.Has(peer.Key()) {
|
||||
return ErrSwitchDuplicatePeer
|
||||
|
||||
}
|
||||
@@ -242,6 +259,7 @@ func (sw *Switch) AddPeer(peer *Peer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FilterConnByAddr returns an error if connecting to the given address is forbidden.
|
||||
func (sw *Switch) FilterConnByAddr(addr net.Addr) error {
|
||||
if sw.filterConnByAddr != nil {
|
||||
return sw.filterConnByAddr(addr)
|
||||
@@ -249,6 +267,7 @@ func (sw *Switch) FilterConnByAddr(addr net.Addr) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FilterConnByPubKey returns an error if connecting to the given public key is forbidden.
|
||||
func (sw *Switch) FilterConnByPubKey(pubkey crypto.PubKeyEd25519) error {
|
||||
if sw.filterConnByPubKey != nil {
|
||||
return sw.filterConnByPubKey(pubkey)
|
||||
@@ -257,22 +276,24 @@ func (sw *Switch) FilterConnByPubKey(pubkey crypto.PubKeyEd25519) error {
|
||||
|
||||
}
|
||||
|
||||
// SetAddrFilter sets the function for filtering connections by address.
|
||||
func (sw *Switch) SetAddrFilter(f func(net.Addr) error) {
|
||||
sw.filterConnByAddr = f
|
||||
}
|
||||
|
||||
// SetPubKeyFilter sets the function for filtering connections by public key.
|
||||
func (sw *Switch) SetPubKeyFilter(f func(crypto.PubKeyEd25519) error) {
|
||||
sw.filterConnByPubKey = f
|
||||
}
|
||||
|
||||
func (sw *Switch) startInitPeer(peer *Peer) {
|
||||
func (sw *Switch) startInitPeer(peer *peer) {
|
||||
peer.Start() // spawn send/recv routines
|
||||
for _, reactor := range sw.reactors {
|
||||
reactor.AddPeer(peer)
|
||||
}
|
||||
}
|
||||
|
||||
// Dial a list of seeds asynchronously in random order
|
||||
// DialSeeds dials a list of seeds asynchronously in random order
|
||||
func (sw *Switch) DialSeeds(addrBook *AddrBook, seeds []string) error {
|
||||
|
||||
netAddrs, err := NewNetAddressStrings(seeds)
|
||||
@@ -315,12 +336,14 @@ func (sw *Switch) dialSeed(addr *NetAddress) {
|
||||
}
|
||||
}
|
||||
|
||||
func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) (*Peer, error) {
|
||||
// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects successfully.
|
||||
// If `persistent == true`, the switch will always try to reconnect to this peer if the connection ever fails.
|
||||
func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) (Peer, error) {
|
||||
sw.dialing.Set(addr.IP.String(), addr)
|
||||
defer sw.dialing.Delete(addr.IP.String())
|
||||
|
||||
sw.Logger.Info("Dialing peer", "address", addr)
|
||||
peer, err := newOutboundPeerWithConfig(addr, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, sw.peerConfig)
|
||||
peer, err := newOutboundPeer(addr, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, sw.peerConfig)
|
||||
if err != nil {
|
||||
sw.Logger.Error("Failed to dial peer", "address", addr, "err", err)
|
||||
return nil, err
|
||||
@@ -329,7 +352,7 @@ func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) (*Peer,
|
||||
if persistent {
|
||||
peer.makePersistent()
|
||||
}
|
||||
err = sw.AddPeer(peer)
|
||||
err = sw.addPeer(peer)
|
||||
if err != nil {
|
||||
sw.Logger.Error("Failed to add peer", "address", addr, "err", err)
|
||||
peer.CloseConn()
|
||||
@@ -339,6 +362,7 @@ func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) (*Peer,
|
||||
return peer, nil
|
||||
}
|
||||
|
||||
// IsDialing returns true if the switch is currently dialing the given address.
|
||||
func (sw *Switch) IsDialing(addr *NetAddress) bool {
|
||||
return sw.dialing.Has(addr.IP.String())
|
||||
}
|
||||
@@ -347,11 +371,12 @@ func (sw *Switch) IsDialing(addr *NetAddress) bool {
|
||||
// trying to send for defaultSendTimeoutSeconds. Returns a channel
|
||||
// which receives success values for each attempted send (false if times out)
|
||||
// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved.
|
||||
// TODO: Something more intelligent.
|
||||
func (sw *Switch) Broadcast(chID byte, msg interface{}) chan bool {
|
||||
successChan := make(chan bool, len(sw.peers.List()))
|
||||
sw.Logger.Debug("Broadcast", "channel", chID, "msg", msg)
|
||||
for _, peer := range sw.peers.List() {
|
||||
go func(peer *Peer) {
|
||||
go func(peer Peer) {
|
||||
success := peer.Send(chID, msg)
|
||||
successChan <- success
|
||||
}(peer)
|
||||
@@ -359,11 +384,11 @@ func (sw *Switch) Broadcast(chID byte, msg interface{}) chan bool {
|
||||
return successChan
|
||||
}
|
||||
|
||||
// Returns the count of outbound/inbound and outbound-dialing peers.
|
||||
// NumPeers returns the count of outbound/inbound and outbound-dialing peers.
|
||||
func (sw *Switch) NumPeers() (outbound, inbound, dialing int) {
|
||||
peers := sw.peers.List()
|
||||
for _, peer := range peers {
|
||||
if peer.outbound {
|
||||
if peer.IsOutbound() {
|
||||
outbound++
|
||||
} else {
|
||||
inbound++
|
||||
@@ -373,14 +398,16 @@ func (sw *Switch) NumPeers() (outbound, inbound, dialing int) {
|
||||
return
|
||||
}
|
||||
|
||||
// Peers returns the set of peers the switch is connected to.
|
||||
func (sw *Switch) Peers() IPeerSet {
|
||||
return sw.peers
|
||||
}
|
||||
|
||||
// Disconnect from a peer due to external error, retry if it is a persistent peer.
|
||||
// StopPeerForError disconnects from a peer due to external error.
|
||||
// If the peer is persistent, it will attempt to reconnect.
|
||||
// TODO: make record depending on reason.
|
||||
func (sw *Switch) StopPeerForError(peer *Peer, reason interface{}) {
|
||||
addr := NewNetAddress(peer.Addr())
|
||||
func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) {
|
||||
addr, _ := NewNetAddressString(peer.NodeInfo().RemoteAddr)
|
||||
sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason)
|
||||
sw.stopAndRemovePeer(peer, reason)
|
||||
|
||||
@@ -410,14 +437,14 @@ func (sw *Switch) StopPeerForError(peer *Peer, reason interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Disconnect from a peer gracefully.
|
||||
// StopPeerGracefully disconnects from a peer gracefully.
|
||||
// TODO: handle graceful disconnects.
|
||||
func (sw *Switch) StopPeerGracefully(peer *Peer) {
|
||||
func (sw *Switch) StopPeerGracefully(peer Peer) {
|
||||
sw.Logger.Info("Stopping peer gracefully")
|
||||
sw.stopAndRemovePeer(peer, nil)
|
||||
}
|
||||
|
||||
func (sw *Switch) stopAndRemovePeer(peer *Peer, reason interface{}) {
|
||||
func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) {
|
||||
sw.peers.Remove(peer)
|
||||
peer.Stop()
|
||||
for _, reactor := range sw.reactors {
|
||||
@@ -457,18 +484,18 @@ func (sw *Switch) listenerRoutine(l Listener) {
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
type SwitchEventNewPeer struct {
|
||||
Peer *Peer
|
||||
Peer Peer
|
||||
}
|
||||
|
||||
type SwitchEventDonePeer struct {
|
||||
Peer *Peer
|
||||
Peer Peer
|
||||
Error interface{}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------
|
||||
// Switches connected via arbitrary net.Conn; useful for testing
|
||||
|
||||
// Returns n switches, connected according to the connect func.
|
||||
// MakeConnectedSwitches returns n switches, connected according to the connect func.
|
||||
// If connect==Connect2Switches, the switches will be fully connected.
|
||||
// initSwitch defines how the ith switch should be initialized (ie. with what reactors).
|
||||
// NOTE: panics if any switch fails to start.
|
||||
@@ -493,7 +520,7 @@ func MakeConnectedSwitches(cfg *cfg.P2PConfig, n int, initSwitch func(int, *Swit
|
||||
|
||||
var PanicOnAddPeerErr = false
|
||||
|
||||
// Will connect switches i and j via net.Pipe()
|
||||
// Connect2Switches will connect switches i and j via net.Pipe()
|
||||
// Blocks until a conection is established.
|
||||
// NOTE: caller ensures i and j are within bounds
|
||||
func Connect2Switches(switches []*Switch, i, j int) {
|
||||
@@ -519,6 +546,8 @@ func Connect2Switches(switches []*Switch, i, j int) {
|
||||
<-doneCh
|
||||
}
|
||||
|
||||
// StartSwitches calls sw.Start() for each given switch.
|
||||
// It returns the first encountered error.
|
||||
func StartSwitches(switches []*Switch) error {
|
||||
for _, s := range switches {
|
||||
_, err := s.Start() // start switch and reactors
|
||||
@@ -547,13 +576,13 @@ func makeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch f
|
||||
}
|
||||
|
||||
func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
|
||||
peer, err := newInboundPeer(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey)
|
||||
peer, err := newInboundPeer(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, sw.peerConfig)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return err
|
||||
}
|
||||
peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr()))
|
||||
if err = sw.AddPeer(peer); err != nil {
|
||||
if err = sw.addPeer(peer); err != nil {
|
||||
conn.Close()
|
||||
return err
|
||||
}
|
||||
@@ -562,13 +591,13 @@ func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
|
||||
}
|
||||
|
||||
func (sw *Switch) addPeerWithConnectionAndConfig(conn net.Conn, config *PeerConfig) error {
|
||||
peer, err := newInboundPeerWithConfig(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, config)
|
||||
peer, err := newInboundPeer(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, config)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return err
|
||||
}
|
||||
peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr()))
|
||||
if err = sw.AddPeer(peer); err != nil {
|
||||
if err = sw.addPeer(peer); err != nil {
|
||||
conn.Close()
|
||||
return err
|
||||
}
|
||||
|
@@ -37,8 +37,8 @@ type TestReactor struct {
|
||||
|
||||
mtx sync.Mutex
|
||||
channels []*ChannelDescriptor
|
||||
peersAdded []*Peer
|
||||
peersRemoved []*Peer
|
||||
peersAdded []Peer
|
||||
peersRemoved []Peer
|
||||
logMessages bool
|
||||
msgsCounter int
|
||||
msgsReceived map[byte][]PeerMessage
|
||||
@@ -59,24 +59,24 @@ func (tr *TestReactor) GetChannels() []*ChannelDescriptor {
|
||||
return tr.channels
|
||||
}
|
||||
|
||||
func (tr *TestReactor) AddPeer(peer *Peer) {
|
||||
func (tr *TestReactor) AddPeer(peer Peer) {
|
||||
tr.mtx.Lock()
|
||||
defer tr.mtx.Unlock()
|
||||
tr.peersAdded = append(tr.peersAdded, peer)
|
||||
}
|
||||
|
||||
func (tr *TestReactor) RemovePeer(peer *Peer, reason interface{}) {
|
||||
func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {
|
||||
tr.mtx.Lock()
|
||||
defer tr.mtx.Unlock()
|
||||
tr.peersRemoved = append(tr.peersRemoved, peer)
|
||||
}
|
||||
|
||||
func (tr *TestReactor) Receive(chID byte, peer *Peer, msgBytes []byte) {
|
||||
func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) {
|
||||
if tr.logMessages {
|
||||
tr.mtx.Lock()
|
||||
defer tr.mtx.Unlock()
|
||||
//fmt.Printf("Received: %X, %X\n", chID, msgBytes)
|
||||
tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.Key, msgBytes, tr.msgsCounter})
|
||||
tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.Key(), msgBytes, tr.msgsCounter})
|
||||
tr.msgsCounter++
|
||||
}
|
||||
}
|
||||
@@ -244,9 +244,9 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
|
||||
rp.Start()
|
||||
defer rp.Stop()
|
||||
|
||||
peer, err := newOutboundPeer(rp.Addr(), sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey)
|
||||
peer, err := newOutboundPeer(rp.Addr(), sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, DefaultPeerConfig())
|
||||
require.Nil(err)
|
||||
err = sw.AddPeer(peer)
|
||||
err = sw.addPeer(peer)
|
||||
require.Nil(err)
|
||||
|
||||
// simulate failure by closing connection
|
||||
@@ -270,10 +270,10 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
|
||||
rp.Start()
|
||||
defer rp.Stop()
|
||||
|
||||
peer, err := newOutboundPeer(rp.Addr(), sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey)
|
||||
peer, err := newOutboundPeer(rp.Addr(), sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, DefaultPeerConfig())
|
||||
peer.makePersistent()
|
||||
require.Nil(err)
|
||||
err = sw.AddPeer(peer)
|
||||
err = sw.addPeer(peer)
|
||||
require.Nil(err)
|
||||
|
||||
// simulate failure by closing connection
|
||||
|
@@ -12,9 +12,9 @@ type AppConnConsensus interface {
|
||||
SetResponseCallback(abcicli.Callback)
|
||||
Error() error
|
||||
|
||||
InitChainSync(validators []*types.Validator) (err error)
|
||||
InitChainSync(types.RequestInitChain) (err error)
|
||||
|
||||
BeginBlockSync(hash []byte, header *types.Header) (err error)
|
||||
BeginBlockSync(types.RequestBeginBlock) (err error)
|
||||
DeliverTxAsync(tx []byte) *abcicli.ReqRes
|
||||
EndBlockSync(height uint64) (types.ResponseEndBlock, error)
|
||||
CommitSync() (res types.Result)
|
||||
@@ -34,8 +34,8 @@ type AppConnQuery interface {
|
||||
Error() error
|
||||
|
||||
EchoSync(string) (res types.Result)
|
||||
InfoSync() (resInfo types.ResponseInfo, err error)
|
||||
QuerySync(reqQuery types.RequestQuery) (resQuery types.ResponseQuery, err error)
|
||||
InfoSync(types.RequestInfo) (types.ResponseInfo, error)
|
||||
QuerySync(types.RequestQuery) (types.ResponseQuery, error)
|
||||
|
||||
// SetOptionSync(key string, value string) (res types.Result)
|
||||
}
|
||||
@@ -61,12 +61,12 @@ func (app *appConnConsensus) Error() error {
|
||||
return app.appConn.Error()
|
||||
}
|
||||
|
||||
func (app *appConnConsensus) InitChainSync(validators []*types.Validator) (err error) {
|
||||
return app.appConn.InitChainSync(validators)
|
||||
func (app *appConnConsensus) InitChainSync(req types.RequestInitChain) (err error) {
|
||||
return app.appConn.InitChainSync(req)
|
||||
}
|
||||
|
||||
func (app *appConnConsensus) BeginBlockSync(hash []byte, header *types.Header) (err error) {
|
||||
return app.appConn.BeginBlockSync(hash, header)
|
||||
func (app *appConnConsensus) BeginBlockSync(req types.RequestBeginBlock) (err error) {
|
||||
return app.appConn.BeginBlockSync(req)
|
||||
}
|
||||
|
||||
func (app *appConnConsensus) DeliverTxAsync(tx []byte) *abcicli.ReqRes {
|
||||
@@ -135,8 +135,8 @@ func (app *appConnQuery) EchoSync(msg string) (res types.Result) {
|
||||
return app.appConn.EchoSync(msg)
|
||||
}
|
||||
|
||||
func (app *appConnQuery) InfoSync() (types.ResponseInfo, error) {
|
||||
return app.appConn.InfoSync()
|
||||
func (app *appConnQuery) InfoSync(req types.RequestInfo) (types.ResponseInfo, error) {
|
||||
return app.appConn.InfoSync(req)
|
||||
}
|
||||
|
||||
func (app *appConnQuery) QuerySync(reqQuery types.RequestQuery) (types.ResponseQuery, error) {
|
||||
|
@@ -17,7 +17,7 @@ import (
|
||||
type AppConnTest interface {
|
||||
EchoAsync(string) *abcicli.ReqRes
|
||||
FlushSync() error
|
||||
InfoSync() (types.ResponseInfo, error)
|
||||
InfoSync(types.RequestInfo) (types.ResponseInfo, error)
|
||||
}
|
||||
|
||||
type appConnTest struct {
|
||||
@@ -36,8 +36,8 @@ func (app *appConnTest) FlushSync() error {
|
||||
return app.appConn.FlushSync()
|
||||
}
|
||||
|
||||
func (app *appConnTest) InfoSync() (types.ResponseInfo, error) {
|
||||
return app.appConn.InfoSync()
|
||||
func (app *appConnTest) InfoSync(req types.RequestInfo) (types.ResponseInfo, error) {
|
||||
return app.appConn.InfoSync(req)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -109,7 +109,7 @@ func BenchmarkEcho(b *testing.B) {
|
||||
proxy.FlushSync()
|
||||
|
||||
b.StopTimer()
|
||||
// info := proxy.InfoSync()
|
||||
// info := proxy.InfoSync(types.RequestInfo{""})
|
||||
//b.Log("N: ", b.N, info)
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ func TestInfo(t *testing.T) {
|
||||
proxy := NewAppConnTest(cli)
|
||||
t.Log("Connected")
|
||||
|
||||
resInfo, err := proxy.InfoSync()
|
||||
resInfo, err := proxy.InfoSync(types.RequestInfo{""})
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
|
@@ -5,11 +5,20 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
merktest "github.com/tendermint/merkleeyes/testutil"
|
||||
|
||||
cmn "github.com/tendermint/tmlibs/common"
|
||||
|
||||
"github.com/tendermint/tendermint/rpc/client"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// MakeTxKV returns a text transaction, allong with expected key, value pair
|
||||
func MakeTxKV() ([]byte, []byte, []byte) {
|
||||
k := []byte(cmn.RandStr(8))
|
||||
v := []byte(cmn.RandStr(8))
|
||||
return k, v, append(k, append([]byte("="), v...)...)
|
||||
}
|
||||
|
||||
func TestHeaderEvents(t *testing.T) {
|
||||
require := require.New(t)
|
||||
for i, c := range GetClients() {
|
||||
@@ -76,7 +85,7 @@ func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) {
|
||||
}
|
||||
|
||||
// make the tx
|
||||
_, _, tx := merktest.MakeTxKV()
|
||||
_, _, tx := MakeTxKV()
|
||||
evtTyp := types.EventStringTx(types.Tx(tx))
|
||||
|
||||
// send async
|
||||
@@ -109,7 +118,7 @@ func TestTxEventsSentWithBroadcastTxSync(t *testing.T) {
|
||||
}
|
||||
|
||||
// make the tx
|
||||
_, _, tx := merktest.MakeTxKV()
|
||||
_, _, tx := MakeTxKV()
|
||||
evtTyp := types.EventStringTx(types.Tx(tx))
|
||||
|
||||
// send async
|
||||
|
@@ -1,13 +1,14 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
data "github.com/tendermint/go-wire/data"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
"github.com/tendermint/tendermint/rpc/lib/client"
|
||||
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
events "github.com/tendermint/tmlibs/events"
|
||||
)
|
||||
@@ -142,7 +143,7 @@ func (c *HTTP) Genesis() (*ctypes.ResultGenesis, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *HTTP) Block(height int) (*ctypes.ResultBlock, error) {
|
||||
func (c *HTTP) Block(height *int) (*ctypes.ResultBlock, error) {
|
||||
result := new(ctypes.ResultBlock)
|
||||
_, err := c.rpc.Call("block", map[string]interface{}{"height": height}, result)
|
||||
if err != nil {
|
||||
@@ -151,7 +152,7 @@ func (c *HTTP) Block(height int) (*ctypes.ResultBlock, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *HTTP) Commit(height int) (*ctypes.ResultCommit, error) {
|
||||
func (c *HTTP) Commit(height *int) (*ctypes.ResultCommit, error) {
|
||||
result := new(ctypes.ResultCommit)
|
||||
_, err := c.rpc.Call("commit", map[string]interface{}{"height": height}, result)
|
||||
if err != nil {
|
||||
@@ -173,9 +174,9 @@ func (c *HTTP) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (c *HTTP) Validators() (*ctypes.ResultValidators, error) {
|
||||
func (c *HTTP) Validators(height *int) (*ctypes.ResultValidators, error) {
|
||||
result := new(ctypes.ResultValidators)
|
||||
_, err := c.rpc.Call("validators", map[string]interface{}{}, result)
|
||||
_, err := c.rpc.Call("validators", map[string]interface{}{"height": height}, result)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Validators")
|
||||
}
|
||||
@@ -349,14 +350,14 @@ func (w *WSEvents) parseEvent(data []byte) (err error) {
|
||||
// no way of exposing these failures, so we panic.
|
||||
// is this right? or silently ignore???
|
||||
func (w *WSEvents) subscribe(event string) {
|
||||
err := w.ws.Subscribe(event)
|
||||
err := w.ws.Subscribe(context.TODO(), event)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WSEvents) unsubscribe(event string) {
|
||||
err := w.ws.Unsubscribe(event)
|
||||
err := w.ws.Unsubscribe(context.TODO(), event)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@@ -42,9 +42,9 @@ type ABCIClient interface {
|
||||
// SignClient groups together the interfaces need to get valid
|
||||
// signatures and prove anything about the chain
|
||||
type SignClient interface {
|
||||
Block(height int) (*ctypes.ResultBlock, error)
|
||||
Commit(height int) (*ctypes.ResultCommit, error)
|
||||
Validators() (*ctypes.ResultValidators, error)
|
||||
Block(height *int) (*ctypes.ResultBlock, error)
|
||||
Commit(height *int) (*ctypes.ResultCommit, error)
|
||||
Validators(height *int) (*ctypes.ResultValidators, error)
|
||||
Tx(hash []byte, prove bool) (*ctypes.ResultTx, error)
|
||||
}
|
||||
|
||||
|
@@ -93,16 +93,16 @@ func (c Local) Genesis() (*ctypes.ResultGenesis, error) {
|
||||
return core.Genesis()
|
||||
}
|
||||
|
||||
func (c Local) Block(height int) (*ctypes.ResultBlock, error) {
|
||||
func (c Local) Block(height *int) (*ctypes.ResultBlock, error) {
|
||||
return core.Block(height)
|
||||
}
|
||||
|
||||
func (c Local) Commit(height int) (*ctypes.ResultCommit, error) {
|
||||
func (c Local) Commit(height *int) (*ctypes.ResultCommit, error) {
|
||||
return core.Commit(height)
|
||||
}
|
||||
|
||||
func (c Local) Validators() (*ctypes.ResultValidators, error) {
|
||||
return core.Validators()
|
||||
func (c Local) Validators(height *int) (*ctypes.ResultValidators, error) {
|
||||
return core.Validators(height)
|
||||
}
|
||||
|
||||
func (c Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user