mirror of
https://github.com/fluencelabs/tendermint
synced 2025-07-16 04:41:59 +00:00
Compare commits
57 Commits
NonMalleab
...
aditya/rev
Author | SHA1 | Date | |
---|---|---|---|
|
4ac6c1defc | ||
|
d8aaaf63fb | ||
|
7c11fc4116 | ||
|
22e2484878 | ||
|
7fe02a04db | ||
|
2e39418124 | ||
|
c962567814 | ||
|
8a282a5fee | ||
|
8dc39b69b7 | ||
|
d70135ec71 | ||
|
e179787d40 | ||
|
0cf8812b17 | ||
|
f7f034a8be | ||
|
4da3de79a7 | ||
|
f5b116c687 | ||
|
be06316c84 | ||
|
8ba8497ac8 | ||
|
f9cce282da | ||
|
23fa2e1f1b | ||
|
ba9cdeaed9 | ||
|
8ed1400949 | ||
|
14fa800773 | ||
|
f3ab967a46 | ||
|
a1eb2f6c6b | ||
|
41bf54a906 | ||
|
88e0973f7d | ||
|
1e3364a014 | ||
|
a4e0a46b73 | ||
|
3ee7c0bfba | ||
|
af77077f3c | ||
|
4b9e8505cb | ||
|
98cb8c9783 | ||
|
3a1f876802 | ||
|
362729c2bb | ||
|
4d7cd8055b | ||
|
756440e0a2 | ||
|
5398420103 | ||
|
c6daa48368 | ||
|
657832a95a | ||
|
51b3428f5c | ||
|
9e4cd19878 | ||
|
816dfce8fe | ||
|
dbf4062acd | ||
|
79e924b34f | ||
|
0c9a284f8d | ||
|
245e1c9ef7 | ||
|
470f1efcc8 | ||
|
4e3b6bfb26 | ||
|
7a86e49312 | ||
|
744d65f173 | ||
|
1b69c6b56b | ||
|
e5084a4787 | ||
|
0787b79347 | ||
|
823d916a11 | ||
|
e8926867d8 | ||
|
0f076e5fbe | ||
|
ac232caef3 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -43,3 +43,5 @@ terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
|
||||
.vscode
|
||||
|
||||
profile\.out
|
||||
|
@@ -8,7 +8,6 @@ linters:
|
||||
- golint
|
||||
- maligned
|
||||
- errcheck
|
||||
- staticcheck
|
||||
- interfacer
|
||||
- unconvert
|
||||
- goconst
|
||||
@@ -16,7 +15,6 @@ linters:
|
||||
- nakedret
|
||||
- lll
|
||||
- gochecknoglobals
|
||||
- gocritic
|
||||
- gochecknoinits
|
||||
- scopelint
|
||||
- stylecheck
|
||||
|
42
CHANGELOG.md
42
CHANGELOG.md
@@ -1,10 +1,48 @@
|
||||
# Changelog
|
||||
|
||||
## v0.32.2
|
||||
|
||||
*July 31, 2019*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@ruseinov, @bluele, @guagualvcha
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
- [libs] [\#3811](https://github.com/tendermint/tendermint/issues/3811) Remove `db` from libs in favor of `https://github.com/tendermint/tm-db`
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [node] [\#3846](https://github.com/tendermint/tendermint/pull/3846) Allow replacing existing p2p.Reactor(s) using [`CustomReactors`
|
||||
option](https://godoc.org/github.com/tendermint/tendermint/node#CustomReactors).
|
||||
Warning: beware of accidental name clashes. Here is the list of existing
|
||||
reactors: MEMPOOL, BLOCKCHAIN, CONSENSUS, EVIDENCE, PEX.
|
||||
- [p2p] [\#3834](https://github.com/tendermint/tendermint/issues/3834) Do not write 'Couldn't connect to any seeds' error log if there are no seeds in config file
|
||||
- [rpc] [\#3818](https://github.com/tendermint/tendermint/issues/3818) Make `max_body_bytes` and `max_header_bytes` configurable(@bluele)
|
||||
- [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele)
|
||||
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md)
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [abci] [\#3809](https://github.com/tendermint/tendermint/issues/3809) Recover from application panics in `server/socket_server.go` to allow socket cleanup (@ruseinov)
|
||||
- [rpc] [\#2252](https://github.com/tendermint/tendermint/issues/2252) Add `/broadcast_evidence` endpoint to submit double signing and other types of evidence
|
||||
- [p2p] [\#3664](https://github.com/tendermint/tendermint/issues/3664) p2p/conn: reuse buffer when write/read from secret connection(@guagualvcha)
|
||||
- [rpc] [\#3076](https://github.com/tendermint/tendermint/issues/3076) Improve transaction search performance
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [p2p] [\#3644](https://github.com/tendermint/tendermint/issues/3644) Fix error logging for connection stop (@defunctzombie)
|
||||
- [rpc] [\#3813](https://github.com/tendermint/tendermint/issues/3813) Return err if page is incorrect (less than 0 or greater than total pages)
|
||||
|
||||
## v0.32.1
|
||||
|
||||
*July 15, 2019*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
Special thanks to external contributors on this release:
|
||||
@ParthDesai, @climber73, @jim380, @ashleyvega
|
||||
|
||||
This release contains a minor enhancement to the ABCI and some breaking changes to our libs folder, namely:
|
||||
@@ -26,7 +64,7 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [node] Add variadic argument to `NewNode` to support functional options, allowing the Node to be more easily customized.
|
||||
- [node] Add variadic argument to `NewNode` to support functional options, allowing the Node to be more easily customized.
|
||||
- [node][\#3730](https://github.com/tendermint/tendermint/pull/3730) Add `CustomReactors` option to `NewNode` allowing caller to pass
|
||||
custom reactors to run inside Tendermint node (@ParthDesai)
|
||||
- [abci] [\#2127](https://github.com/tendermint/tendermint/issues/2127)RequestCheckTx has a new field, `CheckTxType`, which can take values of `CheckTxType_New` and `CheckTxType_Recheck`, indicating whether this is a new tx being checked for the first time or whether this tx is being rechecked after a block commit. This allows applications to skip certain expensive operations, like signature checking, if they've already been done once. see [docs](https://github.com/tendermint/tendermint/blob/eddb433d7c082efbeaf8974413a36641519ee895/docs/spec/abci/apps.md#mempool-connection)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
## v0.32.2
|
||||
## v0.32.3
|
||||
|
||||
\*\*
|
||||
|
||||
@@ -18,6 +18,12 @@ program](https://hackerone.com/tendermint).
|
||||
### FEATURES:
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [abci] \#3809 Recover from application panics in `server/socket_server.go` to allow socket cleanup (@ruseinov)
|
||||
|
||||
- [privval] \#3370 Refactors and simplifies validator/kms connection handling. Please refer to thttps://github.com/tendermint/tendermint/pull/3370#issue-257360971
|
||||
- [consensus] \#3839 Reduce "Error attempting to add vote" message severity (Error -> Info)
|
||||
- [mempool] \#3877 Make `max_tx_bytes` configurable instead of `max_msg_bytes`
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [config] \#3868 move misplaced `max_msg_bytes` into mempool section
|
||||
- [store] \#3893 register block amino, not just crypto
|
||||
|
40
README.md
40
README.md
@@ -1,23 +1,21 @@
|
||||
# Tendermint
|
||||
|
||||

|
||||
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)), for short.
|
||||
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://godoc.org/github.com/tendermint/tendermint)
|
||||
[](https://godoc.org/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://riot.im/app/#/room/#tendermint:matrix.org)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
|
||||
|
||||
Branch | Tests | Coverage
|
||||
----------|-------|----------
|
||||
master | [](https://circleci.com/gh/tendermint/tendermint/tree/master) | [](https://codecov.io/gh/tendermint/tendermint)
|
||||
| Branch | Tests | Coverage |
|
||||
| ------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| master | [](https://circleci.com/gh/tendermint/tendermint/tree/master) | [](https://codecov.io/gh/tendermint/tendermint) |
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
@@ -49,9 +47,9 @@ For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
Requirement|Notes
|
||||
---|---
|
||||
Go version | Go1.11.4 or higher
|
||||
| Requirement | Notes |
|
||||
| ----------- | ------------------ |
|
||||
| Go version | Go1.11.4 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -145,20 +143,20 @@ Additional documentation is found [here](/docs/tools).
|
||||
|
||||
### Sub-projects
|
||||
|
||||
* [Amino](http://github.com/tendermint/go-amino), reflection-based proto3, with
|
||||
- [Amino](http://github.com/tendermint/go-amino), reflection-based proto3, with
|
||||
interfaces
|
||||
* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation
|
||||
- [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation
|
||||
- [Tm-cmn](http://github.com/tendermint/tm-cmn), Commonly used libs across Tendermint & Cosmos repos
|
||||
|
||||
### Applications
|
||||
|
||||
* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
* [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
* [Many more](https://tendermint.com/ecosystem)
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
- [Many more](https://tendermint.com/ecosystem)
|
||||
|
||||
### Research
|
||||
|
||||
* [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
|
||||
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf)
|
||||
* [Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
- [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
|
||||
- [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
- [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf)
|
||||
- [Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
@@ -6,8 +6,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
@@ -39,7 +39,7 @@ func NewGRPCClient(addr string, mustConnect bool) *grpcClient {
|
||||
return cli
|
||||
}
|
||||
|
||||
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return cmn.Connect(addr)
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ func (cli *grpcClient) OnStart() error {
|
||||
}
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
@@ -65,7 +65,7 @@ RETRY_LOOP:
|
||||
|
||||
ENSURE_CONNECTED:
|
||||
for {
|
||||
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.FailFast(true))
|
||||
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true))
|
||||
if err == nil {
|
||||
break ENSURE_CONNECTED
|
||||
}
|
||||
@@ -125,7 +125,7 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
|
||||
func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
|
||||
req := types.ToRequestEcho(msg)
|
||||
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.FailFast(true))
|
||||
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@@ -134,7 +134,7 @@ func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
req := types.ToRequestFlush()
|
||||
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.FailFast(true))
|
||||
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@@ -143,7 +143,7 @@ func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
|
||||
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
req := types.ToRequestInfo(params)
|
||||
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.FailFast(true))
|
||||
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@@ -152,7 +152,7 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
req := types.ToRequestSetOption(params)
|
||||
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.FailFast(true))
|
||||
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@@ -161,7 +161,7 @@ func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.FailFast(true))
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@@ -170,7 +170,7 @@ func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.FailFast(true))
|
||||
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@@ -179,7 +179,7 @@ func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
req := types.ToRequestQuery(params)
|
||||
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.FailFast(true))
|
||||
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@@ -188,7 +188,7 @@ func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
req := types.ToRequestCommit()
|
||||
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.FailFast(true))
|
||||
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@@ -197,7 +197,7 @@ func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
|
||||
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
req := types.ToRequestInitChain(params)
|
||||
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.FailFast(true))
|
||||
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@@ -206,7 +206,7 @@ func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.FailFast(true))
|
||||
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
@@ -215,7 +215,7 @@ func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
|
||||
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.FailFast(true))
|
||||
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
|
@@ -174,9 +174,7 @@ where example.file looks something like:
|
||||
info
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdBatch(cmd, args)
|
||||
},
|
||||
RunE: cmdBatch,
|
||||
}
|
||||
|
||||
var consoleCmd = &cobra.Command{
|
||||
@@ -189,9 +187,7 @@ without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdConsole(cmd, args)
|
||||
},
|
||||
RunE: cmdConsole,
|
||||
}
|
||||
|
||||
var echoCmd = &cobra.Command{
|
||||
@@ -199,27 +195,21 @@ var echoCmd = &cobra.Command{
|
||||
Short: "have the application echo a message",
|
||||
Long: "have the application echo a message",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdEcho(cmd, args)
|
||||
},
|
||||
RunE: cmdEcho,
|
||||
}
|
||||
var infoCmd = &cobra.Command{
|
||||
Use: "info",
|
||||
Short: "get some info about the application",
|
||||
Long: "get some info about the application",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdInfo(cmd, args)
|
||||
},
|
||||
RunE: cmdInfo,
|
||||
}
|
||||
var setOptionCmd = &cobra.Command{
|
||||
Use: "set_option",
|
||||
Short: "set an option on the application",
|
||||
Long: "set an option on the application",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdSetOption(cmd, args)
|
||||
},
|
||||
RunE: cmdSetOption,
|
||||
}
|
||||
|
||||
var deliverTxCmd = &cobra.Command{
|
||||
@@ -227,9 +217,7 @@ var deliverTxCmd = &cobra.Command{
|
||||
Short: "deliver a new transaction to the application",
|
||||
Long: "deliver a new transaction to the application",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdDeliverTx(cmd, args)
|
||||
},
|
||||
RunE: cmdDeliverTx,
|
||||
}
|
||||
|
||||
var checkTxCmd = &cobra.Command{
|
||||
@@ -237,9 +225,7 @@ var checkTxCmd = &cobra.Command{
|
||||
Short: "validate a transaction",
|
||||
Long: "validate a transaction",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdCheckTx(cmd, args)
|
||||
},
|
||||
RunE: cmdCheckTx,
|
||||
}
|
||||
|
||||
var commitCmd = &cobra.Command{
|
||||
@@ -247,9 +233,7 @@ var commitCmd = &cobra.Command{
|
||||
Short: "commit the application state and return the Merkle root hash",
|
||||
Long: "commit the application state and return the Merkle root hash",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdCommit(cmd, args)
|
||||
},
|
||||
RunE: cmdCommit,
|
||||
}
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
@@ -268,9 +252,7 @@ var queryCmd = &cobra.Command{
|
||||
Short: "query the application state",
|
||||
Long: "query the application state",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdQuery(cmd, args)
|
||||
},
|
||||
RunE: cmdQuery,
|
||||
}
|
||||
|
||||
var counterCmd = &cobra.Command{
|
||||
@@ -278,9 +260,7 @@ var counterCmd = &cobra.Command{
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdCounter(cmd, args)
|
||||
},
|
||||
RunE: cmdCounter,
|
||||
}
|
||||
|
||||
var kvstoreCmd = &cobra.Command{
|
||||
@@ -288,9 +268,7 @@ var kvstoreCmd = &cobra.Command{
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdKVStore(cmd, args)
|
||||
},
|
||||
RunE: cmdKVStore,
|
||||
}
|
||||
|
||||
var testCmd = &cobra.Command{
|
||||
@@ -298,9 +276,7 @@ var testCmd = &cobra.Command{
|
||||
Short: "run integration tests",
|
||||
Long: "run integration tests",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdTest(cmd, args)
|
||||
},
|
||||
RunE: cmdTest,
|
||||
}
|
||||
|
||||
// Generates new Args array based off of previous call args to maintain flag persistence
|
||||
@@ -356,16 +332,18 @@ func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
|
||||
func cmdBatch(cmd *cobra.Command, args []string) error {
|
||||
bufReader := bufio.NewReader(os.Stdin)
|
||||
LOOP:
|
||||
for {
|
||||
|
||||
line, more, err := bufReader.ReadLine()
|
||||
if more {
|
||||
switch {
|
||||
case more:
|
||||
return errors.New("Input line is too long")
|
||||
} else if err == io.EOF {
|
||||
break
|
||||
} else if len(line) == 0 {
|
||||
case err == io.EOF:
|
||||
break LOOP
|
||||
case len(line) == 0:
|
||||
continue
|
||||
} else if err != nil {
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -419,7 +397,7 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
}
|
||||
|
||||
// otherwise, we need to skip the next one too
|
||||
i += 1
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
|
@@ -107,7 +107,7 @@ func testStream(t *testing.T, app types.Application) {
|
||||
//-------------------------
|
||||
// test grpc
|
||||
|
||||
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return cmn.Connect(addr)
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) {
|
||||
defer server.Stop()
|
||||
|
||||
// Connect to the socket
|
||||
conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
|
||||
conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
t.Fatalf("Error dialing GRPC server: %v", err.Error())
|
||||
}
|
||||
|
@@ -9,8 +9,8 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -115,6 +115,7 @@ func (app *KVStoreApplication) Commit() types.ResponseCommit {
|
||||
return types.ResponseCommit{Data: appHash}
|
||||
}
|
||||
|
||||
// Returns an associated value or nil if missing.
|
||||
func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
if reqQuery.Prove {
|
||||
value := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
|
@@ -148,7 +148,7 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1])
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
|
@@ -9,8 +9,10 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -27,6 +29,8 @@ type PersistentKVStoreApplication struct {
|
||||
// validator set
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
|
||||
valAddrToPubKeyMap map[string]types.PubKey
|
||||
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
@@ -40,8 +44,9 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
|
||||
state := loadState(db)
|
||||
|
||||
return &PersistentKVStoreApplication{
|
||||
app: &KVStoreApplication{state: state},
|
||||
logger: log.NewNopLogger(),
|
||||
app: &KVStoreApplication{state: state},
|
||||
valAddrToPubKeyMap: make(map[string]types.PubKey),
|
||||
logger: log.NewNopLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,8 +88,20 @@ func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
|
||||
return app.app.Commit()
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
return app.app.Query(reqQuery)
|
||||
// When path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded.
|
||||
// For any other path, returns an associated value or nil if missing.
|
||||
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
switch reqQuery.Path {
|
||||
case "/val":
|
||||
key := []byte("val:" + string(reqQuery.Data))
|
||||
value := app.app.state.db.Get(key)
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
return
|
||||
default:
|
||||
return app.app.Query(reqQuery)
|
||||
}
|
||||
}
|
||||
|
||||
// Save the validators in the merkle tree
|
||||
@@ -102,6 +119,19 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
|
||||
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == tmtypes.ABCIEvidenceTypeDuplicateVote {
|
||||
// decrease voting power by 1
|
||||
if ev.TotalVotingPower == 0 {
|
||||
continue
|
||||
}
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
PubKey: app.valAddrToPubKeyMap[string(ev.Validator.Address)],
|
||||
Power: ev.TotalVotingPower - 1,
|
||||
})
|
||||
}
|
||||
}
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
@@ -174,6 +204,10 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
|
||||
// add, update, or remove a validator
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
|
||||
key := []byte("val:" + string(v.PubKey.Data))
|
||||
|
||||
pubkey := ed25519.PubKeyEd25519{}
|
||||
copy(pubkey[:], v.PubKey.Data)
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
if !app.app.state.db.Has(key) {
|
||||
@@ -183,6 +217,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
||||
}
|
||||
app.app.state.db.Delete(key)
|
||||
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
@@ -192,6 +227,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
||||
Log: fmt.Sprintf("Error encoding validator: %v", err)}
|
||||
}
|
||||
app.app.state.db.Set(key, value.Bytes())
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
|
||||
// we only update the changes array if we successfully updated the tree
|
||||
|
@@ -127,11 +127,12 @@ func (s *SocketServer) acceptConnectionsRoutine() {
|
||||
|
||||
func (s *SocketServer) waitForClose(closeConn chan error, connID int) {
|
||||
err := <-closeConn
|
||||
if err == io.EOF {
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
s.Logger.Error("Connection was closed by client")
|
||||
} else if err != nil {
|
||||
case err != nil:
|
||||
s.Logger.Error("Connection error", "error", err)
|
||||
} else {
|
||||
default:
|
||||
// never happens
|
||||
s.Logger.Error("Connection was closed.")
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package blockchain
|
||||
package v0
|
||||
|
||||
import (
|
||||
amino "github.com/tendermint/go-amino"
|
@@ -1,4 +1,4 @@
|
||||
package blockchain
|
||||
package v0
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -59,6 +59,7 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
|
||||
are not at peer limits, we can probably switch to consensus reactor
|
||||
*/
|
||||
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
cmn.BaseService
|
||||
startTime time.Time
|
||||
@@ -111,17 +112,18 @@ func (pool *BlockPool) makeRequestersRoutine() {
|
||||
}
|
||||
|
||||
_, numPending, lenRequesters := pool.GetStatus()
|
||||
if numPending >= maxPendingRequests {
|
||||
switch {
|
||||
case numPending >= maxPendingRequests:
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
pool.removeTimedoutPeers()
|
||||
} else if lenRequesters >= maxTotalRequesters {
|
||||
case lenRequesters >= maxTotalRequesters:
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
pool.removeTimedoutPeers()
|
||||
} else {
|
||||
default:
|
||||
// request for more blocks.
|
||||
pool.makeNextRequester()
|
||||
}
|
||||
@@ -184,6 +186,7 @@ func (pool *BlockPool) IsCaughtUp() bool {
|
||||
return isCaughtUp
|
||||
}
|
||||
|
||||
// PeekTwoBlocks returns blocks at pool.height and pool.height+1.
|
||||
// We need to see the second block's Commit to validate the first block.
|
||||
// So we peek two blocks at a time.
|
||||
// The caller will verify the commit.
|
||||
@@ -200,7 +203,7 @@ func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block)
|
||||
return
|
||||
}
|
||||
|
||||
// Pop the first block at pool.height
|
||||
// PopRequest pops the first block at pool.height.
|
||||
// It must have been validated by 'second'.Commit from PeekTwoBlocks().
|
||||
func (pool *BlockPool) PopRequest() {
|
||||
pool.mtx.Lock()
|
||||
@@ -220,7 +223,7 @@ func (pool *BlockPool) PopRequest() {
|
||||
}
|
||||
}
|
||||
|
||||
// Invalidates the block at pool.height,
|
||||
// RedoRequest invalidates the block at pool.height,
|
||||
// Remove the peer and redo request from others.
|
||||
// Returns the ID of the removed peer.
|
||||
func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
|
||||
@@ -236,6 +239,7 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
|
||||
return peerID
|
||||
}
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
|
||||
// TODO: ensure that blocks come in order for each peer.
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) {
|
||||
pool.mtx.Lock()
|
||||
@@ -565,9 +569,9 @@ func (bpr *bpRequester) reset() {
|
||||
// Tells bpRequester to pick another peer and try again.
|
||||
// NOTE: Nonblocking, and does nothing if another redo
|
||||
// was already requested.
|
||||
func (bpr *bpRequester) redo(peerId p2p.ID) {
|
||||
func (bpr *bpRequester) redo(peerID p2p.ID) {
|
||||
select {
|
||||
case bpr.redoCh <- peerId:
|
||||
case bpr.redoCh <- peerID:
|
||||
default:
|
||||
}
|
||||
}
|
||||
@@ -622,8 +626,8 @@ OUTER_LOOP:
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// BlockRequest stores a block request identified by the block Height and the PeerID responsible for
|
||||
// delivering the block
|
||||
type BlockRequest struct {
|
||||
Height int64
|
||||
PeerID p2p.ID
|
@@ -1,4 +1,4 @@
|
||||
package blockchain
|
||||
package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
@@ -1,4 +1,4 @@
|
||||
package blockchain
|
||||
package v0
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -60,7 +61,7 @@ type BlockchainReactor struct {
|
||||
initialState sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *BlockStore
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
fastSync bool
|
||||
|
||||
@@ -69,7 +70,7 @@ type BlockchainReactor struct {
|
||||
}
|
||||
|
||||
// NewBlockchainReactor returns new reactor instance.
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *BlockStore,
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
@@ -140,9 +141,9 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
if !peer.Send(BlockchainChannel, msgBytes) {
|
||||
// doing nothing, will try later in `poolRoutine`
|
||||
}
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
// bcStatusResponseMessage from the peer and call pool.SetPeerHeight
|
||||
}
|
||||
@@ -190,18 +191,13 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcBlockRequestMessage:
|
||||
if queued := bcR.respondToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
}
|
||||
bcR.respondToPeer(msg, src)
|
||||
case *bcBlockResponseMessage:
|
||||
bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes))
|
||||
case *bcStatusRequestMessage:
|
||||
// Send peer our state.
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
queued := src.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
// sorry
|
||||
}
|
||||
src.TrySend(BlockchainChannel, msgBytes)
|
||||
case *bcStatusResponseMessage:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerHeight(src.ID(), msg.Height)
|
||||
@@ -273,9 +269,10 @@ FOR_LOOP:
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, blocksSynced)
|
||||
} else {
|
||||
// should only happen during testing
|
||||
}
|
||||
// else {
|
||||
// should only happen during testing
|
||||
// }
|
||||
|
||||
break FOR_LOOP
|
||||
}
|
||||
@@ -378,6 +375,7 @@ type BlockchainMessage interface {
|
||||
ValidateBasic() error
|
||||
}
|
||||
|
||||
// RegisterBlockchainMessages registers the fast sync messages for amino encoding.
|
||||
func RegisterBlockchainMessages(cdc *amino.Codec) {
|
||||
cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
|
||||
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil)
|
||||
@@ -425,8 +423,8 @@ func (m *bcNoBlockResponseMessage) ValidateBasic() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (brm *bcNoBlockResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", brm.Height)
|
||||
func (m *bcNoBlockResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", m.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
@@ -1,4 +1,4 @@
|
||||
package blockchain
|
||||
package v0
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -6,12 +6,13 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var config *cfg.Config
|
||||
@@ -43,24 +45,6 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func makeVote(header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote {
|
||||
addr := privVal.GetPubKey().Address()
|
||||
idx, _ := valset.GetByAddress(addr)
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: idx,
|
||||
Height: header.Height,
|
||||
Round: 1,
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: types.PrecommitType,
|
||||
BlockID: blockID,
|
||||
}
|
||||
|
||||
privVal.SignVote(header.ChainID, vote)
|
||||
|
||||
return vote
|
||||
}
|
||||
|
||||
type BlockchainReactorPair struct {
|
||||
reactor *BlockchainReactor
|
||||
app proxy.AppConns
|
||||
@@ -76,16 +60,16 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(cmn.ErrorWrap(err, "error start app"))
|
||||
panic(errors.Wrap(err, "error start app"))
|
||||
}
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := NewBlockStore(blockDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
if err != nil {
|
||||
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
|
||||
panic(errors.Wrap(err, "error constructing state from genesis file"))
|
||||
}
|
||||
|
||||
// Make the BlockchainReactor itself.
|
||||
@@ -104,8 +88,12 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0]).CommitSig()
|
||||
lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{vote})
|
||||
vote, err := types.MakeVote(lastBlock.Header.Height, lastBlockMeta.BlockID, state.Validators, privVals[0], lastBlock.Header.ChainID)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
voteCommitSig := vote.CommitSig()
|
||||
lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{voteCommitSig})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
@@ -115,7 +103,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(cmn.ErrorWrap(err, "error apply block"))
|
||||
panic(errors.Wrap(err, "error apply block"))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
@@ -258,6 +246,82 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
func TestBcBlockRequestMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
requestHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Request Message", 0, false},
|
||||
{"Valid Request Message", 1, false},
|
||||
{"Invalid Request Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
request := bcBlockRequestMessage{Height: tc.requestHeight}
|
||||
assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
nonResponseHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Non-Response Message", 0, false},
|
||||
{"Valid Non-Response Message", 1, false},
|
||||
{"Invalid Non-Response Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
nonResponse := bcNoBlockResponseMessage{Height: tc.nonResponseHeight}
|
||||
assert.Equal(t, tc.expectErr, nonResponse.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcStatusRequestMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
requestHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Request Message", 0, false},
|
||||
{"Valid Request Message", 1, false},
|
||||
{"Invalid Request Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
request := bcStatusRequestMessage{Height: tc.requestHeight}
|
||||
assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
responseHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Response Message", 0, false},
|
||||
{"Valid Response Message", 1, false},
|
||||
{"Invalid Response Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
response := bcStatusResponseMessage{Height: tc.responseHeight}
|
||||
assert.Equal(t, tc.expectErr, response.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
13
blockchain/v1/codec.go
Normal file
13
blockchain/v1/codec.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
RegisterBlockchainMessages(cdc)
|
||||
types.RegisterBlockAmino(cdc)
|
||||
}
|
209
blockchain/v1/peer.go
Normal file
209
blockchain/v1/peer.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
flow "github.com/tendermint/tendermint/libs/flowrate"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//--------
|
||||
// Peer
|
||||
|
||||
// BpPeerParams stores the peer parameters that are used when creating a peer.
|
||||
type BpPeerParams struct {
|
||||
timeout time.Duration
|
||||
minRecvRate int64
|
||||
sampleRate time.Duration
|
||||
windowSize time.Duration
|
||||
}
|
||||
|
||||
// BpPeer is the datastructure associated with a fast sync peer.
|
||||
type BpPeer struct {
|
||||
logger log.Logger
|
||||
ID p2p.ID
|
||||
|
||||
Height int64 // the peer reported height
|
||||
NumPendingBlockRequests int // number of requests still waiting for block responses
|
||||
blocks map[int64]*types.Block // blocks received or expected to be received from this peer
|
||||
blockResponseTimer *time.Timer
|
||||
recvMonitor *flow.Monitor
|
||||
params *BpPeerParams // parameters for timer and monitor
|
||||
|
||||
onErr func(err error, peerID p2p.ID) // function to call on error
|
||||
}
|
||||
|
||||
// NewBpPeer creates a new peer.
|
||||
func NewBpPeer(
|
||||
peerID p2p.ID, height int64, onErr func(err error, peerID p2p.ID), params *BpPeerParams) *BpPeer {
|
||||
|
||||
if params == nil {
|
||||
params = BpPeerDefaultParams()
|
||||
}
|
||||
return &BpPeer{
|
||||
ID: peerID,
|
||||
Height: height,
|
||||
blocks: make(map[int64]*types.Block, maxRequestsPerPeer),
|
||||
logger: log.NewNopLogger(),
|
||||
onErr: onErr,
|
||||
params: params,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of a peer.
|
||||
func (peer *BpPeer) String() string {
|
||||
return fmt.Sprintf("peer: %v height: %v pending: %v", peer.ID, peer.Height, peer.NumPendingBlockRequests)
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the peer.
|
||||
func (peer *BpPeer) SetLogger(l log.Logger) {
|
||||
peer.logger = l
|
||||
}
|
||||
|
||||
// Cleanup performs cleanup of the peer, removes blocks, requests, stops timer and monitor.
|
||||
func (peer *BpPeer) Cleanup() {
|
||||
if peer.blockResponseTimer != nil {
|
||||
peer.blockResponseTimer.Stop()
|
||||
}
|
||||
if peer.NumPendingBlockRequests != 0 {
|
||||
peer.logger.Info("peer with pending requests is being cleaned", "peer", peer.ID)
|
||||
}
|
||||
if len(peer.blocks)-peer.NumPendingBlockRequests != 0 {
|
||||
peer.logger.Info("peer with pending blocks is being cleaned", "peer", peer.ID)
|
||||
}
|
||||
for h := range peer.blocks {
|
||||
delete(peer.blocks, h)
|
||||
}
|
||||
peer.NumPendingBlockRequests = 0
|
||||
peer.recvMonitor = nil
|
||||
}
|
||||
|
||||
// BlockAtHeight returns the block at a given height if available and errMissingBlock otherwise.
|
||||
func (peer *BpPeer) BlockAtHeight(height int64) (*types.Block, error) {
|
||||
block, ok := peer.blocks[height]
|
||||
if !ok {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
if block == nil {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
return peer.blocks[height], nil
|
||||
}
|
||||
|
||||
// AddBlock adds a block at peer level. Block must be non-nil and recvSize a positive integer
|
||||
// The peer must have a pending request for this block.
|
||||
func (peer *BpPeer) AddBlock(block *types.Block, recvSize int) error {
|
||||
if block == nil || recvSize < 0 {
|
||||
panic("bad parameters")
|
||||
}
|
||||
existingBlock, ok := peer.blocks[block.Height]
|
||||
if !ok {
|
||||
peer.logger.Error("unsolicited block", "blockHeight", block.Height, "peer", peer.ID)
|
||||
return errMissingBlock
|
||||
}
|
||||
if existingBlock != nil {
|
||||
peer.logger.Error("already have a block for height", "height", block.Height)
|
||||
return errDuplicateBlock
|
||||
}
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
panic("peer does not have pending requests")
|
||||
}
|
||||
peer.blocks[block.Height] = block
|
||||
peer.NumPendingBlockRequests--
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
peer.stopMonitor()
|
||||
peer.stopBlockResponseTimer()
|
||||
} else {
|
||||
peer.recvMonitor.Update(recvSize)
|
||||
peer.resetBlockResponseTimer()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBlock removes the block of given height
|
||||
func (peer *BpPeer) RemoveBlock(height int64) {
|
||||
delete(peer.blocks, height)
|
||||
}
|
||||
|
||||
// RequestSent records that a request was sent, and starts the peer timer and monitor if needed.
|
||||
func (peer *BpPeer) RequestSent(height int64) {
|
||||
peer.blocks[height] = nil
|
||||
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
peer.startMonitor()
|
||||
peer.resetBlockResponseTimer()
|
||||
}
|
||||
peer.NumPendingBlockRequests++
|
||||
}
|
||||
|
||||
// CheckRate verifies that the response rate of the peer is acceptable (higher than the minimum allowed).
|
||||
func (peer *BpPeer) CheckRate() error {
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
return nil
|
||||
}
|
||||
curRate := peer.recvMonitor.Status().CurRate
|
||||
// curRate can be 0 on start
|
||||
if curRate != 0 && curRate < peer.params.minRecvRate {
|
||||
err := errSlowPeer
|
||||
peer.logger.Error("SendTimeout", "peer", peer,
|
||||
"reason", err,
|
||||
"curRate", fmt.Sprintf("%d KB/s", curRate/1024),
|
||||
"minRate", fmt.Sprintf("%d KB/s", peer.params.minRecvRate/1024))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (peer *BpPeer) onTimeout() {
|
||||
peer.onErr(errNoPeerResponse, peer.ID)
|
||||
}
|
||||
|
||||
func (peer *BpPeer) stopMonitor() {
|
||||
peer.recvMonitor.Done()
|
||||
peer.recvMonitor = nil
|
||||
}
|
||||
|
||||
func (peer *BpPeer) startMonitor() {
|
||||
peer.recvMonitor = flow.New(peer.params.sampleRate, peer.params.windowSize)
|
||||
initialValue := float64(peer.params.minRecvRate) * math.E
|
||||
peer.recvMonitor.SetREMA(initialValue)
|
||||
}
|
||||
|
||||
func (peer *BpPeer) resetBlockResponseTimer() {
|
||||
if peer.blockResponseTimer == nil {
|
||||
peer.blockResponseTimer = time.AfterFunc(peer.params.timeout, peer.onTimeout)
|
||||
} else {
|
||||
peer.blockResponseTimer.Reset(peer.params.timeout)
|
||||
}
|
||||
}
|
||||
|
||||
func (peer *BpPeer) stopBlockResponseTimer() bool {
|
||||
if peer.blockResponseTimer == nil {
|
||||
return false
|
||||
}
|
||||
return peer.blockResponseTimer.Stop()
|
||||
}
|
||||
|
||||
// BpPeerDefaultParams returns the default peer parameters.
|
||||
func BpPeerDefaultParams() *BpPeerParams {
|
||||
return &BpPeerParams{
|
||||
// Timeout for a peer to respond to a block request.
|
||||
timeout: 15 * time.Second,
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending data at at least that rate, we
|
||||
// consider them to have timedout and we disconnect.
|
||||
//
|
||||
// Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s,
|
||||
// sending data across atlantic ~ 7.5 KB/s.
|
||||
minRecvRate: int64(7680),
|
||||
|
||||
// Monitor parameters
|
||||
sampleRate: time.Second,
|
||||
windowSize: 40 * time.Second,
|
||||
}
|
||||
}
|
278
blockchain/v1/peer_test.go
Normal file
278
blockchain/v1/peer_test.go
Normal file
@@ -0,0 +1,278 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestPeerMonitor(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
peer.startMonitor()
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
peer.stopMonitor()
|
||||
assert.Nil(t, peer.recvMonitor)
|
||||
}
|
||||
|
||||
func TestPeerResetBlockResponseTimer(t *testing.T) {
|
||||
var (
|
||||
numErrFuncCalls int // number of calls to the errFunc
|
||||
lastErr error // last generated error
|
||||
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
|
||||
)
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 10,
|
||||
func(err error, _ p2p.ID) {
|
||||
peerTestMtx.Lock()
|
||||
defer peerTestMtx.Unlock()
|
||||
lastErr = err
|
||||
numErrFuncCalls++
|
||||
},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
|
||||
// initial reset call with peer having a nil timer
|
||||
peer.resetBlockResponseTimer()
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
// make sure timer is running and stop it
|
||||
checkByStoppingPeerTimer(t, peer, true)
|
||||
|
||||
// reset with running timer
|
||||
peer.resetBlockResponseTimer()
|
||||
time.Sleep(time.Millisecond)
|
||||
peer.resetBlockResponseTimer()
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
|
||||
// let the timer expire and ...
|
||||
time.Sleep(3 * time.Millisecond)
|
||||
// ... check timer is not running
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
|
||||
peerTestMtx.Lock()
|
||||
// ... check errNoPeerResponse has been sent
|
||||
assert.Equal(t, 1, numErrFuncCalls)
|
||||
assert.Equal(t, lastErr, errNoPeerResponse)
|
||||
peerTestMtx.Unlock()
|
||||
}
|
||||
|
||||
func TestPeerRequestSent(t *testing.T) {
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
assert.Equal(t, 1, peer.NumPendingBlockRequests)
|
||||
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
assert.Equal(t, 2, peer.NumPendingBlockRequests)
|
||||
}
|
||||
|
||||
func TestPeerGetAndRemoveBlock(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 100,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
|
||||
// Change peer height
|
||||
peer.Height = int64(10)
|
||||
assert.Equal(t, int64(10), peer.Height)
|
||||
|
||||
// request some blocks and receive few of them
|
||||
for i := 1; i <= 10; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
if i > 5 {
|
||||
// only receive blocks 1..5
|
||||
continue
|
||||
}
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 10)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
height int64
|
||||
wantErr error
|
||||
blockPresent bool
|
||||
}{
|
||||
{"no request", 100, errMissingBlock, false},
|
||||
{"no block", 6, errMissingBlock, false},
|
||||
{"block 1 present", 1, nil, true},
|
||||
{"block max present", 5, nil, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// try to get the block
|
||||
b, err := peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, tt.wantErr, err)
|
||||
assert.Equal(t, tt.blockPresent, b != nil)
|
||||
|
||||
// remove the block
|
||||
peer.RemoveBlock(tt.height)
|
||||
_, err = peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, errMissingBlock, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerAddBlock(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 100,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
|
||||
// request some blocks, receive one
|
||||
for i := 1; i <= 10; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
if i == 5 {
|
||||
// receive block 5
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 10)
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
height int64
|
||||
wantErr error
|
||||
blockPresent bool
|
||||
}{
|
||||
{"no request", 50, errMissingBlock, false},
|
||||
{"duplicate block", 5, errDuplicateBlock, true},
|
||||
{"block 1 successfully received", 1, nil, true},
|
||||
{"block max successfully received", 10, nil, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// try to get the block
|
||||
err := peer.AddBlock(makeSmallBlock(int(tt.height)), 10)
|
||||
assert.Equal(t, tt.wantErr, err)
|
||||
_, err = peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, tt.blockPresent, err == nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerOnErrFuncCalledDueToExpiration(t *testing.T) {
|
||||
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
var (
|
||||
numErrFuncCalls int // number of calls to the onErr function
|
||||
lastErr error // last generated error
|
||||
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
|
||||
)
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 10,
|
||||
func(err error, _ p2p.ID) {
|
||||
peerTestMtx.Lock()
|
||||
defer peerTestMtx.Unlock()
|
||||
lastErr = err
|
||||
numErrFuncCalls++
|
||||
},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
peer.RequestSent(1)
|
||||
time.Sleep(4 * time.Millisecond)
|
||||
// timer should have expired by now, check that the on error function was called
|
||||
peerTestMtx.Lock()
|
||||
assert.Equal(t, 1, numErrFuncCalls)
|
||||
assert.Equal(t, errNoPeerResponse, lastErr)
|
||||
peerTestMtx.Unlock()
|
||||
}
|
||||
|
||||
func TestPeerCheckRate(t *testing.T) {
|
||||
params := &BpPeerParams{
|
||||
timeout: time.Second,
|
||||
minRecvRate: int64(100), // 100 bytes/sec exponential moving average
|
||||
}
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
require.Nil(t, peer.CheckRate())
|
||||
|
||||
for i := 0; i < 40; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
}
|
||||
|
||||
// monitor starts with a higher rEMA (~ 2*minRecvRate), wait for it to go down
|
||||
time.Sleep(900 * time.Millisecond)
|
||||
|
||||
// normal peer - send a bit more than 100 bytes/sec, > 10 bytes/100msec, check peer is not considered slow
|
||||
for i := 0; i < 10; i++ {
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 11)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.Nil(t, peer.CheckRate())
|
||||
}
|
||||
|
||||
// slow peer - send a bit less than 10 bytes/100msec
|
||||
for i := 10; i < 20; i++ {
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 9)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
// check peer is considered slow
|
||||
assert.Equal(t, errSlowPeer, peer.CheckRate())
|
||||
}
|
||||
|
||||
func TestPeerCleanup(t *testing.T) {
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
assert.Nil(t, peer.blockResponseTimer)
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
|
||||
peer.Cleanup()
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
}
|
||||
|
||||
// Check if peer timer is running or not (a running timer can be successfully stopped).
|
||||
// Note: stops the timer.
|
||||
func checkByStoppingPeerTimer(t *testing.T, peer *BpPeer, running bool) {
|
||||
assert.NotPanics(t, func() {
|
||||
stopped := peer.stopBlockResponseTimer()
|
||||
if running {
|
||||
assert.True(t, stopped)
|
||||
} else {
|
||||
assert.False(t, stopped)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func makeSmallBlock(height int) *types.Block {
|
||||
return types.MakeBlock(int64(height), []types.Tx{types.Tx("foo")}, nil, nil)
|
||||
}
|
369
blockchain/v1/pool.go
Normal file
369
blockchain/v1/pool.go
Normal file
@@ -0,0 +1,369 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
logger log.Logger
|
||||
// Set of peers that have sent status responses, with height bigger than pool.Height
|
||||
peers map[p2p.ID]*BpPeer
|
||||
// Set of block heights and the corresponding peers from where a block response is expected or has been received.
|
||||
blocks map[int64]p2p.ID
|
||||
|
||||
plannedRequests map[int64]struct{} // list of blocks to be assigned peers for blockRequest
|
||||
nextRequestHeight int64 // next height to be added to plannedRequests
|
||||
|
||||
Height int64 // height of next block to execute
|
||||
MaxPeerHeight int64 // maximum height of all peers
|
||||
toBcR bcReactor
|
||||
}
|
||||
|
||||
// NewBlockPool creates a new BlockPool.
|
||||
func NewBlockPool(height int64, toBcR bcReactor) *BlockPool {
|
||||
return &BlockPool{
|
||||
Height: height,
|
||||
MaxPeerHeight: 0,
|
||||
peers: make(map[p2p.ID]*BpPeer),
|
||||
blocks: make(map[int64]p2p.ID),
|
||||
plannedRequests: make(map[int64]struct{}),
|
||||
nextRequestHeight: height,
|
||||
toBcR: toBcR,
|
||||
}
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the pool.
|
||||
func (pool *BlockPool) SetLogger(l log.Logger) {
|
||||
pool.logger = l
|
||||
}
|
||||
|
||||
// ReachedMaxHeight check if the pool has reached the maximum peer height.
|
||||
func (pool *BlockPool) ReachedMaxHeight() bool {
|
||||
return pool.Height >= pool.MaxPeerHeight
|
||||
}
|
||||
|
||||
func (pool *BlockPool) rescheduleRequest(peerID p2p.ID, height int64) {
|
||||
pool.logger.Info("reschedule requests made to peer for height ", "peerID", peerID, "height", height)
|
||||
pool.plannedRequests[height] = struct{}{}
|
||||
delete(pool.blocks, height)
|
||||
pool.peers[peerID].RemoveBlock(height)
|
||||
}
|
||||
|
||||
// Updates the pool's max height. If no peers are left MaxPeerHeight is set to 0.
|
||||
func (pool *BlockPool) updateMaxPeerHeight() {
|
||||
var newMax int64
|
||||
for _, peer := range pool.peers {
|
||||
peerHeight := peer.Height
|
||||
if peerHeight > newMax {
|
||||
newMax = peerHeight
|
||||
}
|
||||
}
|
||||
pool.MaxPeerHeight = newMax
|
||||
}
|
||||
|
||||
// UpdatePeer adds a new peer or updates an existing peer with a new height.
|
||||
// If a peer is short it is not added.
|
||||
func (pool *BlockPool) UpdatePeer(peerID p2p.ID, height int64) error {
|
||||
|
||||
peer := pool.peers[peerID]
|
||||
|
||||
if peer == nil {
|
||||
if height < pool.Height {
|
||||
pool.logger.Info("Peer height too small",
|
||||
"peer", peerID, "height", height, "fsm_height", pool.Height)
|
||||
return errPeerTooShort
|
||||
}
|
||||
// Add new peer.
|
||||
peer = NewBpPeer(peerID, height, pool.toBcR.sendPeerError, nil)
|
||||
peer.SetLogger(pool.logger.With("peer", peerID))
|
||||
pool.peers[peerID] = peer
|
||||
pool.logger.Info("added peer", "peerID", peerID, "height", height, "num_peers", len(pool.peers))
|
||||
} else {
|
||||
// Check if peer is lowering its height. This is not allowed.
|
||||
if height < peer.Height {
|
||||
pool.RemovePeer(peerID, errPeerLowersItsHeight)
|
||||
return errPeerLowersItsHeight
|
||||
}
|
||||
// Update existing peer.
|
||||
peer.Height = height
|
||||
}
|
||||
|
||||
// Update the pool's MaxPeerHeight if needed.
|
||||
pool.updateMaxPeerHeight()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleans and deletes the peer. Recomputes the max peer height.
|
||||
func (pool *BlockPool) deletePeer(peer *BpPeer) {
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
peer.Cleanup()
|
||||
delete(pool.peers, peer.ID)
|
||||
|
||||
if peer.Height == pool.MaxPeerHeight {
|
||||
pool.updateMaxPeerHeight()
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer removes the blocks and requests from the peer, reschedules them and deletes the peer.
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.ID, err error) {
|
||||
peer := pool.peers[peerID]
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
pool.logger.Info("removing peer", "peerID", peerID, "error", err)
|
||||
|
||||
// Reschedule the block requests made to the peer, or received and not processed yet.
|
||||
// Note that some of the requests may be removed further down.
|
||||
for h := range pool.peers[peerID].blocks {
|
||||
pool.rescheduleRequest(peerID, h)
|
||||
}
|
||||
|
||||
oldMaxPeerHeight := pool.MaxPeerHeight
|
||||
// Delete the peer. This operation may result in the pool's MaxPeerHeight being lowered.
|
||||
pool.deletePeer(peer)
|
||||
|
||||
// Check if the pool's MaxPeerHeight has been lowered.
|
||||
// This may happen if the tallest peer has been removed.
|
||||
if oldMaxPeerHeight > pool.MaxPeerHeight {
|
||||
// Remove any planned requests for heights over the new MaxPeerHeight.
|
||||
for h := range pool.plannedRequests {
|
||||
if h > pool.MaxPeerHeight {
|
||||
delete(pool.plannedRequests, h)
|
||||
}
|
||||
}
|
||||
// Adjust the nextRequestHeight to the new max plus one.
|
||||
if pool.nextRequestHeight > pool.MaxPeerHeight {
|
||||
pool.nextRequestHeight = pool.MaxPeerHeight + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removeShortPeers() {
|
||||
for _, peer := range pool.peers {
|
||||
if peer.Height < pool.Height {
|
||||
pool.RemovePeer(peer.ID, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removeBadPeers() {
|
||||
pool.removeShortPeers()
|
||||
for _, peer := range pool.peers {
|
||||
if err := peer.CheckRate(); err != nil {
|
||||
pool.RemovePeer(peer.ID, err)
|
||||
pool.toBcR.sendPeerError(err, peer.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MakeNextRequests creates more requests if the block pool is running low.
|
||||
func (pool *BlockPool) MakeNextRequests(maxNumRequests int) {
|
||||
heights := pool.makeRequestBatch(maxNumRequests)
|
||||
if len(heights) != 0 {
|
||||
pool.logger.Info("makeNextRequests will make following requests",
|
||||
"number", len(heights), "heights", heights)
|
||||
}
|
||||
|
||||
for _, height := range heights {
|
||||
h := int64(height)
|
||||
if !pool.sendRequest(h) {
|
||||
// If a good peer was not found for sending the request at height h then return,
|
||||
// as it shouldn't be possible to find a peer for h+1.
|
||||
return
|
||||
}
|
||||
delete(pool.plannedRequests, h)
|
||||
}
|
||||
}
|
||||
|
||||
// Makes a batch of requests sorted by height such that the block pool has up to maxNumRequests entries.
|
||||
func (pool *BlockPool) makeRequestBatch(maxNumRequests int) []int {
|
||||
pool.removeBadPeers()
|
||||
// At this point pool.requests may include heights for requests to be redone due to removal of peers:
|
||||
// - peers timed out or were removed by switch
|
||||
// - FSM timed out on waiting to advance the block execution due to missing blocks at h or h+1
|
||||
// Determine the number of requests needed by subtracting the number of requests already made from the maximum
|
||||
// allowed
|
||||
numNeeded := int(maxNumRequests) - len(pool.blocks)
|
||||
for len(pool.plannedRequests) < numNeeded {
|
||||
if pool.nextRequestHeight > pool.MaxPeerHeight {
|
||||
break
|
||||
}
|
||||
pool.plannedRequests[pool.nextRequestHeight] = struct{}{}
|
||||
pool.nextRequestHeight++
|
||||
}
|
||||
|
||||
heights := make([]int, 0, len(pool.plannedRequests))
|
||||
for k := range pool.plannedRequests {
|
||||
heights = append(heights, int(k))
|
||||
}
|
||||
sort.Ints(heights)
|
||||
return heights
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendRequest(height int64) bool {
|
||||
for _, peer := range pool.peers {
|
||||
if peer.NumPendingBlockRequests >= maxRequestsPerPeer {
|
||||
continue
|
||||
}
|
||||
if peer.Height < height {
|
||||
continue
|
||||
}
|
||||
|
||||
err := pool.toBcR.sendBlockRequest(peer.ID, height)
|
||||
if err == errNilPeerForBlockRequest {
|
||||
// Switch does not have this peer, remove it and continue to look for another peer.
|
||||
pool.logger.Error("switch does not have peer..removing peer selected for height", "peer",
|
||||
peer.ID, "height", height)
|
||||
pool.RemovePeer(peer.ID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == errSendQueueFull {
|
||||
pool.logger.Error("peer queue is full", "peer", peer.ID, "height", height)
|
||||
continue
|
||||
}
|
||||
|
||||
pool.logger.Info("assigned request to peer", "peer", peer.ID, "height", height)
|
||||
|
||||
pool.blocks[height] = peer.ID
|
||||
peer.RequestSent(height)
|
||||
|
||||
return true
|
||||
}
|
||||
pool.logger.Error("could not find peer to send request for block at height", "height", height)
|
||||
return false
|
||||
}
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and stores it in the 'blocks' map.
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) error {
|
||||
peer, ok := pool.peers[peerID]
|
||||
if !ok {
|
||||
pool.logger.Error("block from unknown peer", "height", block.Height, "peer", peerID)
|
||||
return errBadDataFromPeer
|
||||
}
|
||||
if wantPeerID, ok := pool.blocks[block.Height]; ok && wantPeerID != peerID {
|
||||
pool.logger.Error("block received from wrong peer", "height", block.Height,
|
||||
"peer", peerID, "expected_peer", wantPeerID)
|
||||
return errBadDataFromPeer
|
||||
}
|
||||
|
||||
return peer.AddBlock(block, blockSize)
|
||||
}
|
||||
|
||||
// BlockData stores the peer responsible to deliver a block and the actual block if delivered.
|
||||
type BlockData struct {
|
||||
block *types.Block
|
||||
peer *BpPeer
|
||||
}
|
||||
|
||||
// BlockAndPeerAtHeight retrieves the block and delivery peer at specified height.
|
||||
// Returns errMissingBlock if a block was not found
|
||||
func (pool *BlockPool) BlockAndPeerAtHeight(height int64) (bData *BlockData, err error) {
|
||||
peerID := pool.blocks[height]
|
||||
peer := pool.peers[peerID]
|
||||
if peer == nil {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
|
||||
block, err := peer.BlockAtHeight(height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BlockData{peer: peer, block: block}, nil
|
||||
|
||||
}
|
||||
|
||||
// FirstTwoBlocksAndPeers returns the blocks and the delivery peers at pool's height H and H+1.
|
||||
func (pool *BlockPool) FirstTwoBlocksAndPeers() (first, second *BlockData, err error) {
|
||||
first, err = pool.BlockAndPeerAtHeight(pool.Height)
|
||||
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// InvalidateFirstTwoBlocks removes the peers that sent us the first two blocks, blocks are removed by RemovePeer().
|
||||
func (pool *BlockPool) InvalidateFirstTwoBlocks(err error) {
|
||||
first, err1 := pool.BlockAndPeerAtHeight(pool.Height)
|
||||
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
|
||||
|
||||
if err1 == nil {
|
||||
pool.RemovePeer(first.peer.ID, err)
|
||||
}
|
||||
if err2 == nil {
|
||||
pool.RemovePeer(second.peer.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessedCurrentHeightBlock performs cleanup after a block is processed. It removes block at pool height and
|
||||
// the peers that are now short.
|
||||
func (pool *BlockPool) ProcessedCurrentHeightBlock() {
|
||||
peerID, peerOk := pool.blocks[pool.Height]
|
||||
if peerOk {
|
||||
pool.peers[peerID].RemoveBlock(pool.Height)
|
||||
}
|
||||
delete(pool.blocks, pool.Height)
|
||||
pool.logger.Debug("removed block at height", "height", pool.Height)
|
||||
pool.Height++
|
||||
pool.removeShortPeers()
|
||||
}
|
||||
|
||||
// RemovePeerAtCurrentHeights checks if a block at pool's height H exists and if not, it removes the
|
||||
// delivery peer and returns. If a block at height H exists then the check and peer removal is done for H+1.
|
||||
// This function is called when the FSM is not able to make progress for some time.
|
||||
// This happens if either the block H or H+1 have not been delivered.
|
||||
func (pool *BlockPool) RemovePeerAtCurrentHeights(err error) {
|
||||
peerID := pool.blocks[pool.Height]
|
||||
peer, ok := pool.peers[peerID]
|
||||
if ok {
|
||||
if _, err := peer.BlockAtHeight(pool.Height); err != nil {
|
||||
pool.logger.Info("remove peer that hasn't sent block at pool.Height",
|
||||
"peer", peerID, "height", pool.Height)
|
||||
pool.RemovePeer(peerID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
peerID = pool.blocks[pool.Height+1]
|
||||
peer, ok = pool.peers[peerID]
|
||||
if ok {
|
||||
if _, err := peer.BlockAtHeight(pool.Height + 1); err != nil {
|
||||
pool.logger.Info("remove peer that hasn't sent block at pool.Height+1",
|
||||
"peer", peerID, "height", pool.Height+1)
|
||||
pool.RemovePeer(peerID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup performs pool and peer cleanup
|
||||
func (pool *BlockPool) Cleanup() {
|
||||
for id, peer := range pool.peers {
|
||||
peer.Cleanup()
|
||||
delete(pool.peers, id)
|
||||
}
|
||||
pool.plannedRequests = make(map[int64]struct{})
|
||||
pool.blocks = make(map[int64]p2p.ID)
|
||||
pool.nextRequestHeight = 0
|
||||
pool.Height = 0
|
||||
pool.MaxPeerHeight = 0
|
||||
}
|
||||
|
||||
// NumPeers returns the number of peers in the pool
|
||||
func (pool *BlockPool) NumPeers() int {
|
||||
return len(pool.peers)
|
||||
}
|
||||
|
||||
// NeedsBlocks returns true if more blocks are required.
|
||||
func (pool *BlockPool) NeedsBlocks() bool {
|
||||
return len(pool.blocks) < maxNumRequests
|
||||
}
|
650
blockchain/v1/pool_test.go
Normal file
650
blockchain/v1/pool_test.go
Normal file
@@ -0,0 +1,650 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type testPeer struct {
|
||||
id p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
type testBcR struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
type testValues struct {
|
||||
numRequestsSent int
|
||||
}
|
||||
|
||||
var testResults testValues
|
||||
|
||||
func resetPoolTestResults() {
|
||||
testResults.numRequestsSent = 0
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendPeerError(err error, peerID p2p.ID) {
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendStatusRequest() {
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
testResults.numRequestsSent++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (testR *testBcR) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
}
|
||||
|
||||
func (testR *testBcR) switchToConsensus() {
|
||||
|
||||
}
|
||||
|
||||
func newTestBcR() *testBcR {
|
||||
testBcR := &testBcR{logger: log.TestingLogger()}
|
||||
return testBcR
|
||||
}
|
||||
|
||||
type tPBlocks struct {
|
||||
id p2p.ID
|
||||
create bool
|
||||
}
|
||||
|
||||
// Makes a block pool with specified current height, list of peers, block requests and block responses
|
||||
func makeBlockPool(bcr *testBcR, height int64, peers []BpPeer, blocks map[int64]tPBlocks) *BlockPool {
|
||||
bPool := NewBlockPool(height, bcr)
|
||||
bPool.SetLogger(bcr.logger)
|
||||
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
var maxH int64
|
||||
for _, p := range peers {
|
||||
if p.Height > maxH {
|
||||
maxH = p.Height
|
||||
}
|
||||
bPool.peers[p.ID] = NewBpPeer(p.ID, p.Height, bcr.sendPeerError, nil)
|
||||
bPool.peers[p.ID].SetLogger(bcr.logger)
|
||||
|
||||
}
|
||||
bPool.MaxPeerHeight = maxH
|
||||
for h, p := range blocks {
|
||||
bPool.blocks[h] = p.id
|
||||
bPool.peers[p.id].RequestSent(int64(h))
|
||||
if p.create {
|
||||
// simulate that a block at height h has been received
|
||||
_ = bPool.peers[p.id].AddBlock(types.MakeBlock(int64(h), txs, nil, nil), 100)
|
||||
}
|
||||
}
|
||||
return bPool
|
||||
}
|
||||
|
||||
func assertPeerSetsEquivalent(t *testing.T, set1 map[p2p.ID]*BpPeer, set2 map[p2p.ID]*BpPeer) {
|
||||
assert.Equal(t, len(set1), len(set2))
|
||||
for peerID, peer1 := range set1 {
|
||||
peer2 := set2[peerID]
|
||||
assert.NotNil(t, peer2)
|
||||
assert.Equal(t, peer1.NumPendingBlockRequests, peer2.NumPendingBlockRequests)
|
||||
assert.Equal(t, peer1.Height, peer2.Height)
|
||||
assert.Equal(t, len(peer1.blocks), len(peer2.blocks))
|
||||
for h, block1 := range peer1.blocks {
|
||||
block2 := peer2.blocks[h]
|
||||
// block1 and block2 could be nil if a request was made but no block was received
|
||||
assert.Equal(t, block1, block2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func assertBlockPoolEquivalent(t *testing.T, poolWanted, pool *BlockPool) {
|
||||
assert.Equal(t, poolWanted.blocks, pool.blocks)
|
||||
assertPeerSetsEquivalent(t, poolWanted.peers, pool.peers)
|
||||
assert.Equal(t, poolWanted.MaxPeerHeight, pool.MaxPeerHeight)
|
||||
assert.Equal(t, poolWanted.Height, pool.Height)
|
||||
|
||||
}
|
||||
|
||||
func TestBlockPoolUpdatePeer(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args testPeer
|
||||
poolWanted *BlockPool
|
||||
errWanted error
|
||||
}{
|
||||
{
|
||||
name: "add a first short peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 50},
|
||||
errWanted: errPeerTooShort,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "add a first good peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 101},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 101}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "increase the height of P1 from 120 to 123",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 123},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 123}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "decrease the height of P1 from 120 to 110",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 110},
|
||||
errWanted: errPeerLowersItsHeight,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "decrease the height of P1 from 105 to 102 with blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 105}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 101: {"P1", true}, 102: {"P1", true}}),
|
||||
args: testPeer{"P1", 102},
|
||||
errWanted: errPeerLowersItsHeight,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{},
|
||||
map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
err := pool.UpdatePeer(tt.args.id, tt.args.height)
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
assert.Equal(t, tt.poolWanted.blocks, tt.pool.blocks)
|
||||
assertPeerSetsEquivalent(t, tt.poolWanted.peers, tt.pool.peers)
|
||||
assert.Equal(t, tt.poolWanted.MaxPeerHeight, tt.pool.MaxPeerHeight)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args args
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "attempt to delete non-existing peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P99", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the only peer without blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the shortest of two peers without blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the tallest of two peers without blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P2", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the only peer with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the shortest of two peers with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 200}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 200}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the tallest of two peers with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 110}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 110}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.RemovePeer(tt.args.peerID, tt.args.err)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemoveShortPeers(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "no short peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
|
||||
{
|
||||
name: "one short peer",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 90}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
|
||||
{
|
||||
name: "all short peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 90}, {ID: "P2", Height: 91}, {ID: "P3", Height: 92}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
pool.removeShortPeers()
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolSendRequestBatch(t *testing.T) {
|
||||
type testPeerResult struct {
|
||||
id p2p.ID
|
||||
numPendingBlockRequests int
|
||||
}
|
||||
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
maxRequestsPerPeer int
|
||||
expRequests map[int64]bool
|
||||
expPeerResults []testPeerResult
|
||||
expnumPendingBlockRequests int
|
||||
}{
|
||||
{
|
||||
name: "one peer - send up to maxRequestsPerPeer block requests",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 2,
|
||||
expRequests: map[int64]bool{10: true, 11: true},
|
||||
expPeerResults: []testPeerResult{{id: "P1", numPendingBlockRequests: 2}},
|
||||
expnumPendingBlockRequests: 2,
|
||||
},
|
||||
{
|
||||
name: "n peers - send n*maxRequestsPerPeer block requests",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 2,
|
||||
expRequests: map[int64]bool{10: true, 11: true},
|
||||
expPeerResults: []testPeerResult{
|
||||
{id: "P1", numPendingBlockRequests: 2},
|
||||
{id: "P2", numPendingBlockRequests: 2}},
|
||||
expnumPendingBlockRequests: 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
resetPoolTestResults()
|
||||
|
||||
var pool = tt.pool
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
pool.MakeNextRequests(10)
|
||||
assert.Equal(t, testResults.numRequestsSent, maxRequestsPerPeer*len(pool.peers))
|
||||
|
||||
for _, tPeer := range tt.expPeerResults {
|
||||
var peer = pool.peers[tPeer.id]
|
||||
assert.NotNil(t, peer)
|
||||
assert.Equal(t, tPeer.numPendingBlockRequests, peer.NumPendingBlockRequests)
|
||||
}
|
||||
assert.Equal(t, testResults.numRequestsSent, maxRequestsPerPeer*len(pool.peers))
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolAddBlock(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
block *types.Block
|
||||
blockSize int
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args args
|
||||
poolWanted *BlockPool
|
||||
errWanted error
|
||||
}{
|
||||
{name: "block from unknown peer",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
args: args{
|
||||
peerID: "P2",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
errWanted: errBadDataFromPeer,
|
||||
},
|
||||
{name: "unexpected block 11 from known peer - waiting for 10",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(11), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{name: "unexpected block 10 from known peer - already have 10",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
|
||||
errWanted: errDuplicateBlock,
|
||||
},
|
||||
{name: "unexpected block 10 from known peer P2 - expected 10 to come from P1",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P2",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
errWanted: errBadDataFromPeer,
|
||||
},
|
||||
{name: "expected block from known peer",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}}),
|
||||
errWanted: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.pool.AddBlock(tt.args.peerID, tt.args.block, tt.args.blockSize)
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolFirstTwoBlocksAndPeers(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
firstWanted int64
|
||||
secondWanted int64
|
||||
errWanted error
|
||||
}{
|
||||
{
|
||||
name: "both blocks missing",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "second block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
|
||||
firstWanted: 15,
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "first block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{16: {"P2", true}, 18: {"P2", true}}),
|
||||
secondWanted: 16,
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "both blocks present",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
|
||||
firstWanted: 10,
|
||||
secondWanted: 11,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
gotFirst, gotSecond, err := pool.FirstTwoBlocksAndPeers()
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
|
||||
if tt.firstWanted != 0 {
|
||||
peer := pool.blocks[tt.firstWanted]
|
||||
block := pool.peers[peer].blocks[tt.firstWanted]
|
||||
assert.Equal(t, block, gotFirst.block,
|
||||
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
|
||||
tt.firstWanted, gotFirst.block.Height)
|
||||
}
|
||||
|
||||
if tt.secondWanted != 0 {
|
||||
peer := pool.blocks[tt.secondWanted]
|
||||
block := pool.peers[peer].blocks[tt.secondWanted]
|
||||
assert.Equal(t, block, gotSecond.block,
|
||||
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
|
||||
tt.secondWanted, gotSecond.block.Height)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolInvalidateFirstTwoBlocks(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "both blocks missing",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
},
|
||||
{
|
||||
name: "second block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P2", true}}),
|
||||
},
|
||||
{
|
||||
name: "first block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P1", true}, 16: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P1", true}}),
|
||||
},
|
||||
{
|
||||
name: "both blocks present",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{},
|
||||
map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.InvalidateFirstTwoBlocks(errNoPeerResponse)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessedCurrentHeightBlock(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "one peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 101, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{101: {"P1", true}}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 101,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.ProcessedCurrentHeightBlock()
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePeerAtCurrentHeight(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "one peer, remove peer for block at H",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", false}, 101: {"P1", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "one peer, remove peer for block at H+1",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers, remove peer for block at H",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", false}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers, remove peer for block at H+1",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", false}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.RemovePeerAtCurrentHeights(errNoPeerResponse)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
622
blockchain/v1/reactor.go
Normal file
622
blockchain/v1/reactor.go
Normal file
@@ -0,0 +1,622 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
trySyncIntervalMS = 10
|
||||
trySendIntervalMS = 10
|
||||
|
||||
// ask for best height every 10s
|
||||
statusUpdateIntervalSeconds = 10
|
||||
|
||||
// NOTE: keep up to date with bcBlockResponseMessage
|
||||
bcBlockResponseMessagePrefixSize = 4
|
||||
bcBlockResponseMessageFieldKeySize = 1
|
||||
maxMsgSize = types.MaxBlockSizeBytes +
|
||||
bcBlockResponseMessagePrefixSize +
|
||||
bcBlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
var (
|
||||
// Maximum number of requests that can be pending per peer, i.e. for which requests have been sent but blocks
|
||||
// have not been received.
|
||||
maxRequestsPerPeer = 20
|
||||
// Maximum number of block requests for the reactor, pending or for which blocks have been received.
|
||||
maxNumRequests = 64
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and fast sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(sm.State, int)
|
||||
}
|
||||
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
initialState sm.State // immutable
|
||||
state sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
|
||||
fastSync bool
|
||||
|
||||
fsm *BcReactorFSM
|
||||
blocksSynced int
|
||||
|
||||
// Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine.
|
||||
messagesForFSMCh chan bcReactorMessage
|
||||
|
||||
// Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed
|
||||
// to this channel to be processed in the context of the poolRoutine.
|
||||
errorsForFSMCh chan bcReactorMessage
|
||||
|
||||
// This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and
|
||||
// the switch.
|
||||
eventsFromFSMCh chan bcFsmMessage
|
||||
|
||||
swReporter *behaviour.SwitchReporter
|
||||
}
|
||||
|
||||
// NewBlockchainReactor returns new reactor instance.
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
store.Height()))
|
||||
}
|
||||
|
||||
const capacity = 1000
|
||||
eventsFromFSMCh := make(chan bcFsmMessage, capacity)
|
||||
messagesForFSMCh := make(chan bcReactorMessage, capacity)
|
||||
errorsForFSMCh := make(chan bcReactorMessage, capacity)
|
||||
|
||||
startHeight := store.Height() + 1
|
||||
bcR := &BlockchainReactor{
|
||||
initialState: state,
|
||||
state: state,
|
||||
blockExec: blockExec,
|
||||
fastSync: fastSync,
|
||||
store: store,
|
||||
messagesForFSMCh: messagesForFSMCh,
|
||||
eventsFromFSMCh: eventsFromFSMCh,
|
||||
errorsForFSMCh: errorsForFSMCh,
|
||||
}
|
||||
fsm := NewFSM(startHeight, bcR)
|
||||
bcR.fsm = fsm
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
|
||||
//bcR.swReporter = behaviour.NewSwitcReporter(bcR.BaseReactor.Switch)
|
||||
|
||||
return bcR
|
||||
}
|
||||
|
||||
// bcReactorMessage is used by the reactor to send messages to the FSM.
|
||||
type bcReactorMessage struct {
|
||||
event bReactorEvent
|
||||
data bReactorEventData
|
||||
}
|
||||
|
||||
type bFsmEvent uint
|
||||
|
||||
const (
|
||||
// message type events
|
||||
peerErrorEv = iota + 1
|
||||
syncFinishedEv
|
||||
)
|
||||
|
||||
type bFsmEventData struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
// bcFsmMessage is used by the FSM to send messages to the reactor
|
||||
type bcFsmMessage struct {
|
||||
event bFsmEvent
|
||||
data bFsmEventData
|
||||
}
|
||||
|
||||
// SetLogger implements cmn.Service by setting the logger on reactor and pool.
|
||||
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
|
||||
bcR.BaseService.Logger = l
|
||||
bcR.fsm.SetLogger(l)
|
||||
}
|
||||
|
||||
// OnStart implements cmn.Service.
|
||||
func (bcR *BlockchainReactor) OnStart() error {
|
||||
bcR.swReporter = behaviour.NewSwitcReporter(bcR.BaseReactor.Switch)
|
||||
if bcR.fastSync {
|
||||
go bcR.poolRoutine()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements cmn.Service.
|
||||
func (bcR *BlockchainReactor) OnStop() {
|
||||
_ = bcR.Stop()
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 10,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
// bcStatusResponseMessage from the peer and call pool.updatePeer()
|
||||
}
|
||||
|
||||
// sendBlockToPeer loads a block and sends it to the requesting peer.
|
||||
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
|
||||
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
|
||||
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcBlockRequestMessage,
|
||||
src p2p.Peer) (queued bool) {
|
||||
|
||||
block := bcR.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block})
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: msg.Height})
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcStatusRequestMessage, src p2p.Peer) (queued bool) {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor by removing peer from the pool.
|
||||
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
msgData := bcReactorMessage{
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peer.ID(),
|
||||
err: errSwitchRemovesPeer,
|
||||
},
|
||||
}
|
||||
bcR.errorsForFSMCh <- msgData
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error decoding message",
|
||||
"src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err = msg.ValidateBasic(); err != nil {
|
||||
bcR.Logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcBlockRequestMessage:
|
||||
if queued := bcR.sendBlockToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send block message to peer", "src", src, "height", msg.Height)
|
||||
}
|
||||
|
||||
case *bcStatusRequestMessage:
|
||||
// Send peer our state.
|
||||
if queued := bcR.sendStatusResponseToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send status message to peer", "src", src)
|
||||
}
|
||||
|
||||
case *bcBlockResponseMessage:
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
height: msg.Block.Height,
|
||||
block: msg.Block,
|
||||
length: len(msgBytes),
|
||||
},
|
||||
}
|
||||
bcR.Logger.Info("Received", "src", src, "height", msg.Block.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
case *bcStatusResponseMessage:
|
||||
// Got a peer status. Unverified.
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
height: msg.Height,
|
||||
length: len(msgBytes),
|
||||
},
|
||||
}
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
}
|
||||
|
||||
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
|
||||
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
|
||||
|
||||
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
doProcessBlockCh := make(chan struct{}, 1)
|
||||
|
||||
lastHundred := time.Now()
|
||||
lastRate := 0.0
|
||||
|
||||
ForLoop:
|
||||
for {
|
||||
select {
|
||||
case <-stopProcessing:
|
||||
bcR.Logger.Info("finishing block execution")
|
||||
break ForLoop
|
||||
case <-processReceivedBlockTicker.C: // try to execute blocks
|
||||
select {
|
||||
case doProcessBlockCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doProcessBlockCh:
|
||||
for {
|
||||
err := bcR.processBlock()
|
||||
if err == errMissingBlock {
|
||||
break
|
||||
}
|
||||
// Notify FSM of block processing result.
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: processedBlockEv,
|
||||
data: bReactorEventData{
|
||||
err: err,
|
||||
},
|
||||
}
|
||||
_ = bcR.fsm.Handle(&msgForFSM)
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
bcR.blocksSynced++
|
||||
if bcR.blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
height, maxPeerHeight := bcR.fsm.Status()
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", height,
|
||||
"max_peer_height", maxPeerHeight, "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// poolRoutine receives and handles messages from the Receive() routine and from the FSM.
|
||||
func (bcR *BlockchainReactor) poolRoutine() {
|
||||
|
||||
bcR.fsm.Start()
|
||||
|
||||
sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond)
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
|
||||
stopProcessing := make(chan struct{}, 1)
|
||||
go bcR.processBlocksRoutine(stopProcessing)
|
||||
|
||||
ForLoop:
|
||||
for {
|
||||
select {
|
||||
|
||||
case <-sendBlockRequestTicker.C:
|
||||
if !bcR.fsm.NeedsBlocks() {
|
||||
continue
|
||||
}
|
||||
_ = bcR.fsm.Handle(&bcReactorMessage{
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{
|
||||
maxNumRequests: maxNumRequests}})
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// Ask for status updates.
|
||||
go bcR.sendStatusRequest()
|
||||
|
||||
case msg := <-bcR.messagesForFSMCh:
|
||||
// Sent from the Receive() routine when status (statusResponseEv) and
|
||||
// block (blockResponseEv) response events are received
|
||||
_ = bcR.fsm.Handle(&msg)
|
||||
|
||||
case msg := <-bcR.errorsForFSMCh:
|
||||
// Sent from the switch.RemovePeer() routine (RemovePeerEv) and
|
||||
// FSM state timer expiry routine (stateTimeoutEv).
|
||||
_ = bcR.fsm.Handle(&msg)
|
||||
|
||||
case msg := <-bcR.eventsFromFSMCh:
|
||||
switch msg.event {
|
||||
case syncFinishedEv:
|
||||
stopProcessing <- struct{}{}
|
||||
// Sent from the FSM when it enters finished state.
|
||||
break ForLoop
|
||||
case peerErrorEv:
|
||||
// Sent from the FSM when it detects peer error
|
||||
bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID)
|
||||
if msg.data.err == errNoPeerResponse {
|
||||
// Sent from the peer timeout handler routine
|
||||
_ = bcR.fsm.Handle(&bcReactorMessage{
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: msg.data.peerID,
|
||||
err: msg.data.err,
|
||||
},
|
||||
})
|
||||
}
|
||||
// else {
|
||||
// For slow peers, or errors due to blocks received from wrong peer
|
||||
// the FSM had already removed the peers
|
||||
// }
|
||||
default:
|
||||
bcR.Logger.Error("Event from FSM not supported", "type", msg.event)
|
||||
}
|
||||
|
||||
case <-bcR.Quit():
|
||||
break ForLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) reportPeerErrorToSwitch(err error, peerID p2p.ID) {
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer != nil {
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(peerID, err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) processBlock() error {
|
||||
|
||||
first, second, err := bcR.fsm.FirstTwoBlocks()
|
||||
if err != nil {
|
||||
// We need both to sync the first block.
|
||||
return err
|
||||
}
|
||||
|
||||
chainID := bcR.initialState.ChainID
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartsHeader := firstParts.Header()
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err = bcR.state.Validators.VerifyCommit(chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error during commit verification", "err", err,
|
||||
"first", first.Height, "second", second.Height)
|
||||
return errBlockVerificationFailure
|
||||
}
|
||||
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
bcR.state, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// sendStatusRequest broadcasts `BlockStore` height.
|
||||
func (bcR *BlockchainReactor) sendStatusRequest() {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()})
|
||||
bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// BlockRequest sends `BlockRequest` height.
|
||||
func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{height})
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
return errSendQueueFull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) switchToConsensus() {
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(bcR.state, bcR.blocksSynced)
|
||||
bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv}
|
||||
}
|
||||
// else {
|
||||
// Should only happen during testing.
|
||||
// }
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// Called by FSM and pool:
|
||||
// - pool calls when it detects slow peer or when peer times out
|
||||
// - FSM calls when:
|
||||
// - adding a block (addBlock) fails
|
||||
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
|
||||
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
|
||||
msgData := bcFsmMessage{
|
||||
event: peerErrorEv,
|
||||
data: bFsmEventData{
|
||||
peerID: peerID,
|
||||
err: err,
|
||||
},
|
||||
}
|
||||
bcR.eventsFromFSMCh <- msgData
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
if timer == nil {
|
||||
panic("nil timer pointer parameter")
|
||||
}
|
||||
if *timer == nil {
|
||||
*timer = time.AfterFunc(timeout, func() {
|
||||
msg := bcReactorMessage{
|
||||
event: stateTimeoutEv,
|
||||
data: bReactorEventData{
|
||||
stateName: name,
|
||||
},
|
||||
}
|
||||
bcR.errorsForFSMCh <- msg
|
||||
})
|
||||
} else {
|
||||
(*timer).Reset(timeout)
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Messages
|
||||
|
||||
// BlockchainMessage is a generic message for this reactor.
|
||||
type BlockchainMessage interface {
|
||||
ValidateBasic() error
|
||||
}
|
||||
|
||||
// RegisterBlockchainMessages registers the fast sync messages for amino encoding.
|
||||
func RegisterBlockchainMessages(cdc *amino.Codec) {
|
||||
cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
|
||||
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil)
|
||||
cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil)
|
||||
cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil)
|
||||
cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil)
|
||||
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil)
|
||||
}
|
||||
|
||||
func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
|
||||
if len(bz) > maxMsgSize {
|
||||
return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
|
||||
}
|
||||
err = cdc.UnmarshalBinaryBare(bz, &msg)
|
||||
return
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bcBlockRequestMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcBlockRequestMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcBlockRequestMessage) String() string {
|
||||
return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height)
|
||||
}
|
||||
|
||||
type bcNoBlockResponseMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcNoBlockResponseMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcNoBlockResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", m.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bcBlockResponseMessage struct {
|
||||
Block *types.Block
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcBlockResponseMessage) ValidateBasic() error {
|
||||
return m.Block.ValidateBasic()
|
||||
}
|
||||
|
||||
func (m *bcBlockResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bcStatusRequestMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcStatusRequestMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcStatusRequestMessage) String() string {
|
||||
return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bcStatusResponseMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcStatusResponseMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcStatusResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height)
|
||||
}
|
450
blockchain/v1/reactor_fsm.go
Normal file
450
blockchain/v1/reactor_fsm.go
Normal file
@@ -0,0 +1,450 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Blockchain Reactor State
|
||||
type bcReactorFSMState struct {
|
||||
name string
|
||||
|
||||
// called when transitioning out of current state
|
||||
handle func(*BcReactorFSM, bReactorEvent, bReactorEventData) (next *bcReactorFSMState, err error)
|
||||
// called when entering the state
|
||||
enter func(fsm *BcReactorFSM)
|
||||
|
||||
// timeout to ensure FSM is not stuck in a state forever
|
||||
// the timer is owned and run by the fsm instance
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (s *bcReactorFSMState) String() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// BcReactorFSM is the datastructure for the Blockchain Reactor State Machine
|
||||
type BcReactorFSM struct {
|
||||
logger log.Logger
|
||||
mtx sync.Mutex
|
||||
|
||||
startTime time.Time
|
||||
|
||||
state *bcReactorFSMState
|
||||
stateTimer *time.Timer
|
||||
pool *BlockPool
|
||||
|
||||
// interface used to call the Blockchain reactor to send StatusRequest, BlockRequest, reporting errors, etc.
|
||||
toBcR bcReactor
|
||||
}
|
||||
|
||||
// NewFSM creates a new reactor FSM.
|
||||
func NewFSM(height int64, toBcR bcReactor) *BcReactorFSM {
|
||||
return &BcReactorFSM{
|
||||
state: unknown,
|
||||
startTime: time.Now(),
|
||||
pool: NewBlockPool(height, toBcR),
|
||||
toBcR: toBcR,
|
||||
}
|
||||
}
|
||||
|
||||
// bReactorEventData is part of the message sent by the reactor to the FSM and used by the state handlers.
|
||||
type bReactorEventData struct {
|
||||
peerID p2p.ID
|
||||
err error // for peer error: timeout, slow; for processed block event if error occurred
|
||||
height int64 // for status response; for processed block event
|
||||
block *types.Block // for block response
|
||||
stateName string // for state timeout events
|
||||
length int // for block response event, length of received block, used to detect slow peers
|
||||
maxNumRequests int // for request needed event, maximum number of pending requests
|
||||
}
|
||||
|
||||
// Blockchain Reactor Events (the input to the state machine)
|
||||
type bReactorEvent uint
|
||||
|
||||
const (
|
||||
// message type events
|
||||
startFSMEv = iota + 1
|
||||
statusResponseEv
|
||||
blockResponseEv
|
||||
processedBlockEv
|
||||
makeRequestsEv
|
||||
stopFSMEv
|
||||
|
||||
// other events
|
||||
peerRemoveEv = iota + 256
|
||||
stateTimeoutEv
|
||||
)
|
||||
|
||||
func (msg *bcReactorMessage) String() string {
|
||||
var dataStr string
|
||||
|
||||
switch msg.event {
|
||||
case startFSMEv:
|
||||
dataStr = ""
|
||||
case statusResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v height=%v", msg.data.peerID, msg.data.height)
|
||||
case blockResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v block.height=%v length=%v",
|
||||
msg.data.peerID, msg.data.block.Height, msg.data.length)
|
||||
case processedBlockEv:
|
||||
dataStr = fmt.Sprintf("error=%v", msg.data.err)
|
||||
case makeRequestsEv:
|
||||
dataStr = ""
|
||||
case stopFSMEv:
|
||||
dataStr = ""
|
||||
case peerRemoveEv:
|
||||
dataStr = fmt.Sprintf("peer: %v is being removed by the switch", msg.data.peerID)
|
||||
case stateTimeoutEv:
|
||||
dataStr = fmt.Sprintf("state=%v", msg.data.stateName)
|
||||
default:
|
||||
dataStr = fmt.Sprintf("cannot interpret message data")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v: %v", msg.event, dataStr)
|
||||
}
|
||||
|
||||
func (ev bReactorEvent) String() string {
|
||||
switch ev {
|
||||
case startFSMEv:
|
||||
return "startFSMEv"
|
||||
case statusResponseEv:
|
||||
return "statusResponseEv"
|
||||
case blockResponseEv:
|
||||
return "blockResponseEv"
|
||||
case processedBlockEv:
|
||||
return "processedBlockEv"
|
||||
case makeRequestsEv:
|
||||
return "makeRequestsEv"
|
||||
case stopFSMEv:
|
||||
return "stopFSMEv"
|
||||
case peerRemoveEv:
|
||||
return "peerRemoveEv"
|
||||
case stateTimeoutEv:
|
||||
return "stateTimeoutEv"
|
||||
default:
|
||||
return "event unknown"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// states
|
||||
var (
|
||||
unknown *bcReactorFSMState
|
||||
waitForPeer *bcReactorFSMState
|
||||
waitForBlock *bcReactorFSMState
|
||||
finished *bcReactorFSMState
|
||||
)
|
||||
|
||||
// timeouts for state timers
|
||||
const (
|
||||
waitForPeerTimeout = 3 * time.Second
|
||||
waitForBlockAtCurrentHeightTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// errors
|
||||
var (
|
||||
// internal to the package
|
||||
errNoErrorFinished = errors.New("fast sync is finished")
|
||||
errInvalidEvent = errors.New("invalid event in current state")
|
||||
errMissingBlock = errors.New("missing blocks")
|
||||
errNilPeerForBlockRequest = errors.New("peer for block request does not exist in the switch")
|
||||
errSendQueueFull = errors.New("block request not made, send-queue is full")
|
||||
errPeerTooShort = errors.New("peer height too low, old peer removed/ new peer not added")
|
||||
errSwitchRemovesPeer = errors.New("switch is removing peer")
|
||||
errTimeoutEventWrongState = errors.New("timeout event for a state different than the current one")
|
||||
errNoTallerPeer = errors.New("fast sync timed out on waiting for a peer taller than this node")
|
||||
|
||||
// reported eventually to the switch
|
||||
errPeerLowersItsHeight = errors.New("fast sync peer reports a height lower than previous") // handle return
|
||||
errNoPeerResponseForCurrentHeights = errors.New("fast sync timed out on peer block response for current heights") // handle return
|
||||
errNoPeerResponse = errors.New("fast sync timed out on peer block response") // xx
|
||||
errBadDataFromPeer = errors.New("fast sync received block from wrong peer or block is bad") // xx
|
||||
errDuplicateBlock = errors.New("fast sync received duplicate block from peer")
|
||||
errBlockVerificationFailure = errors.New("fast sync block verification failure") // xx
|
||||
errSlowPeer = errors.New("fast sync peer is not sending us data fast enough") // xx
|
||||
|
||||
)
|
||||
|
||||
func init() {
|
||||
unknown = &bcReactorFSMState{
|
||||
name: "unknown",
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
case startFSMEv:
|
||||
// Broadcast Status message. Currently doesn't return non-nil error.
|
||||
fsm.toBcR.sendStatusRequest()
|
||||
return waitForPeer, nil
|
||||
|
||||
case stopFSMEv:
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return unknown, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
waitForPeer = &bcReactorFSMState{
|
||||
name: "waitForPeer",
|
||||
timeout: waitForPeerTimeout,
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
// Stop when leaving the state.
|
||||
fsm.resetStateTimer()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
case stateTimeoutEv:
|
||||
if data.stateName != "waitForPeer" {
|
||||
fsm.logger.Error("received a state timeout event for different state",
|
||||
"state", data.stateName)
|
||||
return waitForPeer, errTimeoutEventWrongState
|
||||
}
|
||||
// There was no statusResponse received from any peer.
|
||||
// Should we send status request again?
|
||||
return finished, errNoTallerPeer
|
||||
|
||||
case statusResponseEv:
|
||||
if err := fsm.pool.UpdatePeer(data.peerID, data.height); err != nil {
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
}
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return waitForBlock, nil
|
||||
|
||||
case stopFSMEv:
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return waitForPeer, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
waitForBlock = &bcReactorFSMState{
|
||||
name: "waitForBlock",
|
||||
timeout: waitForBlockAtCurrentHeightTimeout,
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
// Stop when leaving the state.
|
||||
fsm.resetStateTimer()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
|
||||
case statusResponseEv:
|
||||
err := fsm.pool.UpdatePeer(data.peerID, data.height)
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, err
|
||||
}
|
||||
return waitForBlock, err
|
||||
|
||||
case blockResponseEv:
|
||||
fsm.logger.Debug("blockResponseEv", "H", data.block.Height)
|
||||
err := fsm.pool.AddBlock(data.peerID, data.block, data.length)
|
||||
if err != nil {
|
||||
// A block was received that was unsolicited, from unexpected peer, or that we already have it.
|
||||
// Ignore block, remove peer and send error to switch.
|
||||
fsm.pool.RemovePeer(data.peerID, err)
|
||||
fsm.toBcR.sendPeerError(err, data.peerID)
|
||||
}
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
return waitForBlock, err
|
||||
|
||||
case processedBlockEv:
|
||||
if data.err != nil {
|
||||
first, second, _ := fsm.pool.FirstTwoBlocksAndPeers()
|
||||
fsm.logger.Error("error processing block", "err", data.err,
|
||||
"first", first.block.Height, "second", second.block.Height)
|
||||
fsm.logger.Error("send peer error for", "peer", first.peer.ID)
|
||||
fsm.toBcR.sendPeerError(data.err, first.peer.ID)
|
||||
fsm.logger.Error("send peer error for", "peer", second.peer.ID)
|
||||
fsm.toBcR.sendPeerError(data.err, second.peer.ID)
|
||||
// Remove the first two blocks. This will also remove the peers
|
||||
fsm.pool.InvalidateFirstTwoBlocks(data.err)
|
||||
} else {
|
||||
fsm.pool.ProcessedCurrentHeightBlock()
|
||||
// Since we advanced one block reset the state timer
|
||||
fsm.resetStateTimer()
|
||||
}
|
||||
|
||||
// Both cases above may result in achieving maximum height.
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
|
||||
return waitForBlock, data.err
|
||||
|
||||
case peerRemoveEv:
|
||||
// This event is sent by the switch to remove disconnected and errored peers.
|
||||
fsm.pool.RemovePeer(data.peerID, data.err)
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, nil
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
return waitForBlock, nil
|
||||
|
||||
case makeRequestsEv:
|
||||
fsm.makeNextRequests(data.maxNumRequests)
|
||||
return waitForBlock, nil
|
||||
|
||||
case stateTimeoutEv:
|
||||
if data.stateName != "waitForBlock" {
|
||||
fsm.logger.Error("received a state timeout event for different state",
|
||||
"state", data.stateName)
|
||||
return waitForBlock, errTimeoutEventWrongState
|
||||
}
|
||||
// We haven't received the block at current height or height+1. Remove peer.
|
||||
fsm.pool.RemovePeerAtCurrentHeights(errNoPeerResponseForCurrentHeights)
|
||||
fsm.resetStateTimer()
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, errNoPeerResponseForCurrentHeights
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
return waitForBlock, errNoPeerResponseForCurrentHeights
|
||||
|
||||
case stopFSMEv:
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return waitForBlock, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
finished = &bcReactorFSMState{
|
||||
name: "finished",
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
fsm.logger.Info("Time to switch to consensus reactor!", "height", fsm.pool.Height)
|
||||
fsm.toBcR.switchToConsensus()
|
||||
fsm.cleanup()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
return finished, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Interface used by FSM for sending Block and Status requests,
|
||||
// informing of peer errors and state timeouts
|
||||
// Implemented by BlockchainReactor and tests
|
||||
type bcReactor interface {
|
||||
sendStatusRequest()
|
||||
sendBlockRequest(peerID p2p.ID, height int64) error
|
||||
sendPeerError(err error, peerID p2p.ID)
|
||||
resetStateTimer(name string, timer **time.Timer, timeout time.Duration)
|
||||
switchToConsensus()
|
||||
}
|
||||
|
||||
// SetLogger sets the FSM logger.
|
||||
func (fsm *BcReactorFSM) SetLogger(l log.Logger) {
|
||||
fsm.logger = l
|
||||
fsm.pool.SetLogger(l)
|
||||
}
|
||||
|
||||
// Start starts the FSM.
|
||||
func (fsm *BcReactorFSM) Start() {
|
||||
_ = fsm.Handle(&bcReactorMessage{event: startFSMEv})
|
||||
}
|
||||
|
||||
// Handle processes messages and events sent to the FSM.
|
||||
func (fsm *BcReactorFSM) Handle(msg *bcReactorMessage) error {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
fsm.logger.Debug("FSM received", "event", msg, "state", fsm.state)
|
||||
|
||||
if fsm.state == nil {
|
||||
fsm.state = unknown
|
||||
}
|
||||
next, err := fsm.state.handle(fsm, msg.event, msg.data)
|
||||
if err != nil {
|
||||
fsm.logger.Error("FSM event handler returned", "err", err,
|
||||
"state", fsm.state, "event", msg.event)
|
||||
}
|
||||
|
||||
oldState := fsm.state.name
|
||||
fsm.transition(next)
|
||||
if oldState != fsm.state.name {
|
||||
fsm.logger.Info("FSM changed state", "new_state", fsm.state)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) transition(next *bcReactorFSMState) {
|
||||
if next == nil {
|
||||
return
|
||||
}
|
||||
if fsm.state != next {
|
||||
fsm.state = next
|
||||
if next.enter != nil {
|
||||
next.enter(fsm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called when entering an FSM state in order to detect lack of progress in the state machine.
|
||||
// Note the use of the 'bcr' interface to facilitate testing without timer expiring.
|
||||
func (fsm *BcReactorFSM) resetStateTimer() {
|
||||
fsm.toBcR.resetStateTimer(fsm.state.name, &fsm.stateTimer, fsm.state.timeout)
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) isCaughtUp() bool {
|
||||
return fsm.state == finished
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) makeNextRequests(maxNumRequests int) {
|
||||
fsm.pool.MakeNextRequests(maxNumRequests)
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) cleanup() {
|
||||
fsm.pool.Cleanup()
|
||||
}
|
||||
|
||||
// NeedsBlocks checks if more block requests are required.
|
||||
func (fsm *BcReactorFSM) NeedsBlocks() bool {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
return fsm.state.name == "waitForBlock" && fsm.pool.NeedsBlocks()
|
||||
}
|
||||
|
||||
// FirstTwoBlocks returns the two blocks at pool height and height+1
|
||||
func (fsm *BcReactorFSM) FirstTwoBlocks() (first, second *types.Block, err error) {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
firstBP, secondBP, err := fsm.pool.FirstTwoBlocksAndPeers()
|
||||
if err == nil {
|
||||
first = firstBP.block
|
||||
second = secondBP.block
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Status returns the pool's height and the maximum peer height.
|
||||
func (fsm *BcReactorFSM) Status() (height, maxPeerHeight int64) {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
return fsm.pool.Height, fsm.pool.MaxPeerHeight
|
||||
}
|
938
blockchain/v1/reactor_fsm_test.go
Normal file
938
blockchain/v1/reactor_fsm_test.go
Normal file
@@ -0,0 +1,938 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type lastBlockRequestT struct {
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
type lastPeerErrorT struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
// reactor for FSM testing
|
||||
type testReactor struct {
|
||||
logger log.Logger
|
||||
fsm *BcReactorFSM
|
||||
numStatusRequests int
|
||||
numBlockRequests int
|
||||
lastBlockRequest lastBlockRequestT
|
||||
lastPeerError lastPeerErrorT
|
||||
stateTimerStarts map[string]int
|
||||
}
|
||||
|
||||
func sendEventToFSM(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) error {
|
||||
return fsm.Handle(&bcReactorMessage{event: ev, data: data})
|
||||
}
|
||||
|
||||
type fsmStepTestValues struct {
|
||||
currentState string
|
||||
event bReactorEvent
|
||||
data bReactorEventData
|
||||
|
||||
wantErr error
|
||||
wantState string
|
||||
wantStatusReqSent bool
|
||||
wantReqIncreased bool
|
||||
wantNewBlocks []int64
|
||||
wantRemovedPeers []p2p.ID
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// helper test function for different FSM events, state and expected behavior
|
||||
func sStopFSMEv(current, expected string) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: stopFSMEv,
|
||||
wantState: expected,
|
||||
wantErr: errNoErrorFinished}
|
||||
}
|
||||
|
||||
func sUnknownFSMEv(current string) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: 1234,
|
||||
wantState: current,
|
||||
wantErr: errInvalidEvent}
|
||||
}
|
||||
|
||||
func sStartFSMEv() fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: "unknown",
|
||||
event: startFSMEv,
|
||||
wantState: "waitForPeer",
|
||||
wantStatusReqSent: true}
|
||||
}
|
||||
|
||||
func sStateTimeoutEv(current, expected string, timedoutState string, wantErr error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: stateTimeoutEv,
|
||||
data: bReactorEventData{
|
||||
stateName: timedoutState,
|
||||
},
|
||||
wantState: expected,
|
||||
wantErr: wantErr,
|
||||
}
|
||||
}
|
||||
|
||||
func sProcessedBlockEv(current, expected string, reactorError error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: processedBlockEv,
|
||||
data: bReactorEventData{
|
||||
err: reactorError,
|
||||
},
|
||||
wantState: expected,
|
||||
wantErr: reactorError,
|
||||
}
|
||||
}
|
||||
|
||||
func sStatusEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{peerID: peerID, height: height},
|
||||
wantState: expected,
|
||||
wantErr: err}
|
||||
}
|
||||
|
||||
func sMakeRequestsEv(current, expected string, maxPendingRequests int) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{maxNumRequests: maxPendingRequests},
|
||||
wantState: expected,
|
||||
wantReqIncreased: true,
|
||||
}
|
||||
}
|
||||
|
||||
func sMakeRequestsEvErrored(current, expected string,
|
||||
maxPendingRequests int, err error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{maxNumRequests: maxPendingRequests},
|
||||
wantState: expected,
|
||||
wantErr: err,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
wantReqIncreased: true,
|
||||
}
|
||||
}
|
||||
|
||||
func sBlockRespEv(current, expected string, peerID p2p.ID, height int64, prevBlocks []int64) fsmStepTestValues {
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
height: height,
|
||||
block: types.MakeBlock(int64(height), txs, nil, nil),
|
||||
length: 100},
|
||||
wantState: expected,
|
||||
wantNewBlocks: append(prevBlocks, height),
|
||||
}
|
||||
}
|
||||
|
||||
func sBlockRespEvErrored(current, expected string,
|
||||
peerID p2p.ID, height int64, prevBlocks []int64, wantErr error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
height: height,
|
||||
block: types.MakeBlock(int64(height), txs, nil, nil),
|
||||
length: 100},
|
||||
wantState: expected,
|
||||
wantErr: wantErr,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
wantNewBlocks: prevBlocks,
|
||||
}
|
||||
}
|
||||
|
||||
func sPeerRemoveEv(current, expected string, peerID p2p.ID, err error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
err: err,
|
||||
},
|
||||
wantState: expected,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------
|
||||
|
||||
func newTestReactor(height int64) *testReactor {
|
||||
testBcR := &testReactor{logger: log.TestingLogger(), stateTimerStarts: make(map[string]int)}
|
||||
testBcR.fsm = NewFSM(height, testBcR)
|
||||
testBcR.fsm.SetLogger(testBcR.logger)
|
||||
return testBcR
|
||||
}
|
||||
|
||||
func fixBlockResponseEvStep(step *fsmStepTestValues, testBcR *testReactor) {
|
||||
// There is currently no good way to know to which peer a block request was sent.
|
||||
// So in some cases where it does not matter, before we simulate a block response
|
||||
// we cheat and look where it is expected from.
|
||||
if step.event == blockResponseEv {
|
||||
height := step.data.height
|
||||
peerID, ok := testBcR.fsm.pool.blocks[height]
|
||||
if ok {
|
||||
step.data.peerID = peerID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testFields struct {
|
||||
name string
|
||||
startingHeight int64
|
||||
maxRequestsPerPeer int
|
||||
maxPendingRequests int
|
||||
steps []fsmStepTestValues
|
||||
}
|
||||
|
||||
func executeFSMTests(t *testing.T, tests []testFields, matchRespToReq bool) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test reactor
|
||||
testBcR := newTestReactor(tt.startingHeight)
|
||||
|
||||
if tt.maxRequestsPerPeer != 0 {
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
}
|
||||
|
||||
for _, step := range tt.steps {
|
||||
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
|
||||
|
||||
var heightBefore int64
|
||||
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
|
||||
heightBefore = testBcR.fsm.pool.Height
|
||||
}
|
||||
oldNumStatusRequests := testBcR.numStatusRequests
|
||||
oldNumBlockRequests := testBcR.numBlockRequests
|
||||
if matchRespToReq {
|
||||
fixBlockResponseEvStep(&step, testBcR)
|
||||
}
|
||||
|
||||
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
|
||||
assert.Equal(t, step.wantErr, fsmErr)
|
||||
|
||||
if step.wantStatusReqSent {
|
||||
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
|
||||
}
|
||||
|
||||
if step.wantReqIncreased {
|
||||
assert.True(t, oldNumBlockRequests < testBcR.numBlockRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumBlockRequests, testBcR.numBlockRequests)
|
||||
}
|
||||
|
||||
for _, height := range step.wantNewBlocks {
|
||||
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(height)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
|
||||
heightAfter := testBcR.fsm.pool.Height
|
||||
assert.Equal(t, heightBefore, heightAfter)
|
||||
firstAfter, err1 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
|
||||
secondAfter, err2 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
|
||||
assert.NotNil(t, err1)
|
||||
assert.NotNil(t, err2)
|
||||
assert.Nil(t, firstAfter)
|
||||
assert.Nil(t, secondAfter)
|
||||
}
|
||||
|
||||
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
|
||||
|
||||
if step.wantState == "finished" {
|
||||
assert.True(t, testBcR.fsm.isCaughtUp())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSMBasic(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "one block, one peer - TS2",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 2,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 2, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi block, multi peer - TS2",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 2,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 4, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 4, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 4, []int64{1, 2, 3}),
|
||||
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMBlockVerificationFailure(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block verification failure - TS2 variant",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
|
||||
// add P1 and get blocks 1-3 from it
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// process block failure, should remove P1 and all blocks
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", errBlockVerificationFailure),
|
||||
|
||||
// get blocks 1-3 from P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
|
||||
|
||||
// finish after processing blocks 1 and 2
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMBadBlockFromPeer(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block we haven't asked for",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and ask for blocks 1-3
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 300, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// blockResponseEv for height 100 should cause an error
|
||||
sBlockRespEvErrored("waitForBlock", "waitForPeer",
|
||||
"P1", 100, []int64{}, errMissingBlock, []p2p.ID{}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block we already have",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and get block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 100, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 1, []int64{}),
|
||||
|
||||
// Get block 1 again. Since peer is removed together with block 1,
|
||||
// the blocks present in the pool should be {}
|
||||
sBlockRespEvErrored("waitForBlock", "waitForPeer",
|
||||
"P1", 1, []int64{}, errDuplicateBlock, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block from unknown peer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and get block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
|
||||
// get block 1 from unknown peer P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEvErrored("waitForBlock", "waitForBlock",
|
||||
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block from wrong peer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, make requests for blocks 1-3 to P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// receive block 1 from P2
|
||||
sBlockRespEvErrored("waitForBlock", "waitForBlock",
|
||||
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMBlockAtCurrentHeightDoesNotArriveInTime(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block at current height undelivered - TS5",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, get blocks 1 and 2, process block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 2, []int64{1}),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// timeout on block 3, P1 should be removed
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
|
||||
// make requests and finish by receiving blocks 2 and 3 from P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{2}),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block at current height undelivered, at maxPeerHeight after peer removal - TS3",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, request blocks 1-3 from P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2 (tallest)
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 30, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// receive blocks 1-3 from P1
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
|
||||
|
||||
// process blocks at heights 1 and 2
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// timeout on block at height 4
|
||||
sStateTimeoutEv("waitForBlock", "finished", "waitForBlock", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMPeerRelatedEvents(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "peer remove event with no blocks",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, P2, P3
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P3", 3, nil),
|
||||
|
||||
// switch removes P2
|
||||
sPeerRemoveEv("waitForBlock", "waitForBlock", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only peer removed while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
|
||||
// switch removes P1
|
||||
sPeerRemoveEv("waitForBlock", "waitForPeer", "P1", errSwitchRemovesPeer, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "highest peer removed while in waitForBlock state, node reaches maxPeerHeight - TS4 ",
|
||||
startingHeight: 100,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and make requests
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
|
||||
|
||||
// get blocks 100 and 101 from P1 and process block at height 100
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// switch removes peer P1, should be finished
|
||||
sPeerRemoveEv("waitForBlock", "finished", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "highest peer lowers its height in waitForBlock state, node reaches maxPeerHeight - TS4",
|
||||
startingHeight: 100,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and make requests
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
|
||||
|
||||
// get blocks 100 and 101 from P1
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
|
||||
|
||||
// processed block at heights 100
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// P2 becomes short
|
||||
sStatusEv("waitForBlock", "finished", "P2", 100, errPeerLowersItsHeight),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new short peer while in waitForPeer state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForPeer", "P1", 3, errPeerTooShort),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new short peer while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, errPeerTooShort),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only peer updated with low height while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
sStatusEv("waitForBlock", "waitForPeer", "P1", 3, errPeerLowersItsHeight),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "peer does not exist in the switch",
|
||||
startingHeight: 9999999,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 20000000, nil),
|
||||
// send request for block 9999999
|
||||
// Note: For this block request the "switch missing the peer" error is simulated,
|
||||
// see implementation of bcReactor interface, sendBlockRequest(), in this file.
|
||||
sMakeRequestsEvErrored("waitForBlock", "waitForBlock",
|
||||
maxNumRequests, nil, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMStopFSM(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "stopFSMEv in unknown",
|
||||
steps: []fsmStepTestValues{
|
||||
sStopFSMEv("unknown", "finished"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stopFSMEv in waitForPeer",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStopFSMEv("waitForPeer", "finished"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stopFSMEv in waitForBlock",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sStopFSMEv("waitForBlock", "finished"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMUnknownElements(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "unknown event for state unknown",
|
||||
steps: []fsmStepTestValues{
|
||||
sUnknownFSMEv("unknown"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown event for state waitForPeer",
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sUnknownFSMEv("waitForPeer"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown event for state waitForBlock",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sUnknownFSMEv("waitForBlock"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMPeerStateTimeoutEvent(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "timeout event for state waitForPeer while in state waitForPeer - TS1",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStateTimeoutEv("waitForPeer", "finished", "waitForPeer", errNoTallerPeer),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForPeer while in a state != waitForPeer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStateTimeoutEv("waitForPeer", "waitForPeer", "waitForBlock", errTimeoutEventWrongState),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock while in state waitForBlock ",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStateTimeoutEv("waitForBlock", "waitForPeer", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock while in a state != waitForBlock",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForPeer", errTimeoutEventWrongState),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock with multiple peers",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func makeCorrectTransitionSequence(startingHeight int64, numBlocks int64, numPeers int, randomPeerHeights bool,
|
||||
maxRequestsPerPeer int, maxPendingRequests int) testFields {
|
||||
|
||||
// Generate numPeers peers with random or numBlocks heights according to the randomPeerHeights flag.
|
||||
peerHeights := make([]int64, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
if i == 0 {
|
||||
peerHeights[0] = numBlocks
|
||||
continue
|
||||
}
|
||||
if randomPeerHeights {
|
||||
peerHeights[i] = int64(cmn.MaxInt(cmn.RandIntn(int(numBlocks)), int(startingHeight)+1))
|
||||
} else {
|
||||
peerHeights[i] = numBlocks
|
||||
}
|
||||
}
|
||||
|
||||
// Approximate the slice capacity to save time for appends.
|
||||
testSteps := make([]fsmStepTestValues, 0, 3*numBlocks+int64(numPeers))
|
||||
|
||||
testName := fmt.Sprintf("%v-blocks %v-startingHeight %v-peers %v-maxRequestsPerPeer %v-maxNumRequests",
|
||||
numBlocks, startingHeight, numPeers, maxRequestsPerPeer, maxPendingRequests)
|
||||
|
||||
// Add startFSMEv step.
|
||||
testSteps = append(testSteps, sStartFSMEv())
|
||||
|
||||
// For each peer, add statusResponseEv step.
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerName := fmt.Sprintf("P%d", i)
|
||||
if i == 0 {
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sStatusEv("waitForPeer", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
|
||||
} else {
|
||||
testSteps = append(testSteps,
|
||||
sStatusEv("waitForBlock", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
|
||||
}
|
||||
}
|
||||
|
||||
height := startingHeight
|
||||
numBlocksReceived := 0
|
||||
prevBlocks := make([]int64, 0, maxPendingRequests)
|
||||
|
||||
forLoop:
|
||||
for i := 0; i < int(numBlocks); i++ {
|
||||
|
||||
// Add the makeRequestEv step periodically.
|
||||
if i%int(maxRequestsPerPeer) == 0 {
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
)
|
||||
}
|
||||
|
||||
// Add the blockRespEv step
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P0", height, prevBlocks))
|
||||
prevBlocks = append(prevBlocks, height)
|
||||
height++
|
||||
numBlocksReceived++
|
||||
|
||||
// Add the processedBlockEv step periodically.
|
||||
if numBlocksReceived >= int(maxRequestsPerPeer) || height >= numBlocks {
|
||||
for j := int(height) - numBlocksReceived; j < int(height); j++ {
|
||||
if j >= int(numBlocks) {
|
||||
// This is the last block that is processed, we should be in "finished" state.
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil))
|
||||
break forLoop
|
||||
}
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil))
|
||||
}
|
||||
numBlocksReceived = 0
|
||||
prevBlocks = make([]int64, 0, maxPendingRequests)
|
||||
}
|
||||
}
|
||||
|
||||
return testFields{
|
||||
name: testName,
|
||||
startingHeight: startingHeight,
|
||||
maxRequestsPerPeer: maxRequestsPerPeer,
|
||||
maxPendingRequests: maxPendingRequests,
|
||||
steps: testSteps,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
maxStartingHeightTest = 100
|
||||
maxRequestsPerPeerTest = 20
|
||||
maxTotalPendingRequestsTest = 600
|
||||
maxNumPeersTest = 1000
|
||||
maxNumBlocksInChainTest = 10000 //should be smaller than 9999999
|
||||
)
|
||||
|
||||
func makeCorrectTransitionSequenceWithRandomParameters() testFields {
|
||||
// Generate a starting height for fast sync.
|
||||
startingHeight := int64(cmn.RandIntn(maxStartingHeightTest) + 1)
|
||||
|
||||
// Generate the number of requests per peer.
|
||||
maxRequestsPerPeer := cmn.RandIntn(maxRequestsPerPeerTest) + 1
|
||||
|
||||
// Generate the maximum number of total pending requests, >= maxRequestsPerPeer.
|
||||
maxPendingRequests := cmn.RandIntn(maxTotalPendingRequestsTest-int(maxRequestsPerPeer)) + maxRequestsPerPeer
|
||||
|
||||
// Generate the number of blocks to be synced.
|
||||
numBlocks := int64(cmn.RandIntn(maxNumBlocksInChainTest)) + startingHeight
|
||||
|
||||
// Generate a number of peers.
|
||||
numPeers := cmn.RandIntn(maxNumPeersTest) + 1
|
||||
|
||||
return makeCorrectTransitionSequence(startingHeight, numBlocks, numPeers, true, maxRequestsPerPeer, maxPendingRequests)
|
||||
}
|
||||
|
||||
func shouldApplyProcessedBlockEvStep(step *fsmStepTestValues, testBcR *testReactor) bool {
|
||||
if step.event == processedBlockEv {
|
||||
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
|
||||
if err == errMissingBlock {
|
||||
return false
|
||||
}
|
||||
_, err = testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
|
||||
if err == errMissingBlock {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestFSMCorrectTransitionSequences(t *testing.T) {
|
||||
|
||||
tests := []testFields{
|
||||
makeCorrectTransitionSequence(1, 100, 10, true, 10, 40),
|
||||
makeCorrectTransitionSequenceWithRandomParameters(),
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test reactor
|
||||
testBcR := newTestReactor(tt.startingHeight)
|
||||
|
||||
if tt.maxRequestsPerPeer != 0 {
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
}
|
||||
|
||||
for _, step := range tt.steps {
|
||||
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
|
||||
|
||||
oldNumStatusRequests := testBcR.numStatusRequests
|
||||
fixBlockResponseEvStep(&step, testBcR)
|
||||
if !shouldApplyProcessedBlockEvStep(&step, testBcR) {
|
||||
continue
|
||||
}
|
||||
|
||||
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
|
||||
assert.Equal(t, step.wantErr, fsmErr)
|
||||
|
||||
if step.wantStatusReqSent {
|
||||
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
|
||||
}
|
||||
|
||||
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
|
||||
if step.wantState == "finished" {
|
||||
assert.True(t, testBcR.fsm.isCaughtUp())
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
||||
// implements the bcRNotifier
|
||||
func (testR *testReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
testR.logger.Info("Reactor received sendPeerError call from FSM", "peer", peerID, "err", err)
|
||||
testR.lastPeerError.peerID = peerID
|
||||
testR.lastPeerError.err = err
|
||||
}
|
||||
|
||||
func (testR *testReactor) sendStatusRequest() {
|
||||
testR.logger.Info("Reactor received sendStatusRequest call from FSM")
|
||||
testR.numStatusRequests++
|
||||
}
|
||||
|
||||
func (testR *testReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
testR.logger.Info("Reactor received sendBlockRequest call from FSM", "peer", peerID, "height", height)
|
||||
testR.numBlockRequests++
|
||||
testR.lastBlockRequest.peerID = peerID
|
||||
testR.lastBlockRequest.height = height
|
||||
if height == 9999999 {
|
||||
// simulate switch does not have peer
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (testR *testReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
testR.logger.Info("Reactor received resetStateTimer call from FSM", "state", name, "timeout", timeout)
|
||||
if _, ok := testR.stateTimerStarts[name]; !ok {
|
||||
testR.stateTimerStarts[name] = 1
|
||||
} else {
|
||||
testR.stateTimerStarts[name]++
|
||||
}
|
||||
}
|
||||
|
||||
func (testR *testReactor) switchToConsensus() {
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
413
blockchain/v1/reactor_test.go
Normal file
413
blockchain/v1/reactor_test.go
Normal file
@@ -0,0 +1,413 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var config *cfg.Config
|
||||
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func makeVote(header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote {
|
||||
addr := privVal.GetPubKey().Address()
|
||||
idx, _ := valset.GetByAddress(addr)
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: idx,
|
||||
Height: header.Height,
|
||||
Round: 1,
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: types.PrecommitType,
|
||||
BlockID: blockID,
|
||||
}
|
||||
|
||||
_ = privVal.SignVote(header.ChainID, vote)
|
||||
|
||||
return vote
|
||||
}
|
||||
|
||||
type BlockchainReactorPair struct {
|
||||
bcR *BlockchainReactor
|
||||
conR *consensusReactorTest
|
||||
}
|
||||
|
||||
func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, maxBlockHeight int64) *BlockchainReactor {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error start app"))
|
||||
}
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error constructing state from genesis file"))
|
||||
}
|
||||
|
||||
// Make the BlockchainReactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.MockEvidencePool{})
|
||||
sm.SaveState(db, state)
|
||||
|
||||
// let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0]).CommitSig()
|
||||
lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{vote})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()}
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error apply block"))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
return bcReactor
|
||||
}
|
||||
|
||||
func newBlockchainReactorPair(logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, maxBlockHeight int64) BlockchainReactorPair {
|
||||
|
||||
consensusReactor := &consensusReactorTest{}
|
||||
consensusReactor.BaseReactor = *p2p.NewBaseReactor("Consensus reactor", consensusReactor)
|
||||
|
||||
return BlockchainReactorPair{
|
||||
newBlockchainReactor(logger, genDoc, privVals, maxBlockHeight),
|
||||
consensusReactor}
|
||||
}
|
||||
|
||||
type consensusReactorTest struct {
|
||||
p2p.BaseReactor // BaseService + p2p.Switch
|
||||
switchedToConsensus bool
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced int) {
|
||||
conR.mtx.Lock()
|
||||
defer conR.mtx.Unlock()
|
||||
conR.switchedToConsensus = true
|
||||
}
|
||||
|
||||
func TestFastSyncNoBlockResponse(t *testing.T) {
|
||||
|
||||
config = cfg.ResetTestRoot("blockchain_new_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 2)
|
||||
|
||||
logger := log.TestingLogger()
|
||||
reactorPairs[0] = newBlockchainReactorPair(logger, genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactorPair(logger, genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", i)
|
||||
reactorPairs[i].bcR.SetLogger(logger.With("module", moduleName))
|
||||
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
_ = r.bcR.Stop()
|
||||
_ = r.conR.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
height int64
|
||||
existent bool
|
||||
}{
|
||||
{maxBlockHeight + 2, false},
|
||||
{10, true},
|
||||
{1, true},
|
||||
{maxBlockHeight + 100, false},
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
reactorPairs[1].conR.mtx.Lock()
|
||||
if reactorPairs[1].conR.switchedToConsensus {
|
||||
reactorPairs[1].conR.mtx.Unlock()
|
||||
break
|
||||
}
|
||||
reactorPairs[1].conR.mtx.Unlock()
|
||||
}
|
||||
|
||||
assert.Equal(t, maxBlockHeight, reactorPairs[0].bcR.store.Height())
|
||||
|
||||
for _, tt := range tests {
|
||||
block := reactorPairs[1].bcR.store.LoadBlock(tt.height)
|
||||
if tt.existent {
|
||||
assert.True(t, block != nil)
|
||||
} else {
|
||||
assert.True(t, block == nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestFastSyncBadBlockStopsPeer(t *testing.T) {
|
||||
numNodes := 4
|
||||
maxBlockHeight := int64(148)
|
||||
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
otherChain := newBlockchainReactorPair(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
defer func() {
|
||||
_ = otherChain.bcR.Stop()
|
||||
_ = otherChain.conR.Stop()
|
||||
}()
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, numNodes)
|
||||
logger := make([]log.Logger, numNodes)
|
||||
|
||||
for i := 0; i < numNodes; i++ {
|
||||
logger[i] = log.TestingLogger()
|
||||
height := int64(0)
|
||||
if i == 0 {
|
||||
height = maxBlockHeight
|
||||
}
|
||||
reactorPairs[i] = newBlockchainReactorPair(logger[i], genDoc, privVals, height)
|
||||
}
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, numNodes, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
reactorPairs[i].conR.mtx.Lock()
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", i)
|
||||
reactorPairs[i].bcR.SetLogger(logger[i].With("module", moduleName))
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
_ = r.bcR.Stop()
|
||||
_ = r.conR.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
outerFor:
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
for i := 0; i < numNodes; i++ {
|
||||
reactorPairs[i].conR.mtx.Lock()
|
||||
if !reactorPairs[i].conR.switchedToConsensus {
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
continue outerFor
|
||||
}
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
//at this time, reactors[0-3] is the newest
|
||||
assert.Equal(t, numNodes-1, reactorPairs[1].bcR.Switch.Peers().Size())
|
||||
|
||||
//mark last reactorPair as an invalid peer
|
||||
reactorPairs[numNodes-1].bcR.store = otherChain.bcR.store
|
||||
|
||||
lastLogger := log.TestingLogger()
|
||||
lastReactorPair := newBlockchainReactorPair(lastLogger, genDoc, privVals, 0)
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[len(reactorPairs)-1].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", len(reactorPairs)-1)
|
||||
reactorPairs[len(reactorPairs)-1].bcR.SetLogger(lastLogger.With("module", moduleName))
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)...)
|
||||
|
||||
for i := 0; i < len(reactorPairs)-1; i++ {
|
||||
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
lastReactorPair.conR.mtx.Lock()
|
||||
if lastReactorPair.conR.switchedToConsensus {
|
||||
lastReactorPair.conR.mtx.Unlock()
|
||||
break
|
||||
}
|
||||
lastReactorPair.conR.mtx.Unlock()
|
||||
|
||||
if lastReactorPair.bcR.Switch.Peers().Size() == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
func TestBcBlockRequestMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
requestHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Request Message", 0, false},
|
||||
{"Valid Request Message", 1, false},
|
||||
{"Invalid Request Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
request := bcBlockRequestMessage{Height: tc.requestHeight}
|
||||
assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
nonResponseHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Non-Response Message", 0, false},
|
||||
{"Valid Non-Response Message", 1, false},
|
||||
{"Invalid Non-Response Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
nonResponse := bcNoBlockResponseMessage{Height: tc.nonResponseHeight}
|
||||
assert.Equal(t, tc.expectErr, nonResponse.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcStatusRequestMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
requestHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Request Message", 0, false},
|
||||
{"Valid Request Message", 1, false},
|
||||
{"Invalid Request Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
request := bcStatusRequestMessage{Height: tc.requestHeight}
|
||||
assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
responseHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Response Message", 0, false},
|
||||
{"Valid Response Message", 1, false},
|
||||
{"Invalid Response Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
response := bcStatusResponseMessage{Height: tc.responseHeight}
|
||||
assert.Equal(t, tc.expectErr, response.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
387
blockchain/v2/schedule.go
Normal file
387
blockchain/v2/schedule.go
Normal file
@@ -0,0 +1,387 @@
|
||||
// nolint:unused
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
type Event interface{}
|
||||
|
||||
type blockState int
|
||||
|
||||
const (
|
||||
blockStateUnknown blockState = iota
|
||||
blockStateNew
|
||||
blockStatePending
|
||||
blockStateReceived
|
||||
blockStateProcessed
|
||||
)
|
||||
|
||||
func (e blockState) String() string {
|
||||
switch e {
|
||||
case blockStateUnknown:
|
||||
return "Unknown"
|
||||
case blockStateNew:
|
||||
return "New"
|
||||
case blockStatePending:
|
||||
return "Pending"
|
||||
case blockStateReceived:
|
||||
return "Received"
|
||||
case blockStateProcessed:
|
||||
return "Processed"
|
||||
default:
|
||||
return fmt.Sprintf("unknown blockState: %d", e)
|
||||
}
|
||||
}
|
||||
|
||||
type peerState int
|
||||
|
||||
const (
|
||||
peerStateNew = iota
|
||||
peerStateReady
|
||||
peerStateRemoved
|
||||
)
|
||||
|
||||
func (e peerState) String() string {
|
||||
switch e {
|
||||
case peerStateNew:
|
||||
return "New"
|
||||
case peerStateReady:
|
||||
return "Ready"
|
||||
case peerStateRemoved:
|
||||
return "Removed"
|
||||
default:
|
||||
return fmt.Sprintf("unknown peerState: %d", e)
|
||||
}
|
||||
}
|
||||
|
||||
type scPeer struct {
|
||||
peerID p2p.ID
|
||||
state peerState
|
||||
height int64
|
||||
lastTouched time.Time
|
||||
lastRate int64
|
||||
}
|
||||
|
||||
func newScPeer(peerID p2p.ID) *scPeer {
|
||||
return &scPeer{
|
||||
peerID: peerID,
|
||||
state: peerStateNew,
|
||||
height: -1,
|
||||
lastTouched: time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
// The schedule is a composite data structure which allows a scheduler to keep
|
||||
// track of which blocks have been scheduled into which state.
|
||||
type schedule struct {
|
||||
initHeight int64
|
||||
// a list of blocks in which blockState
|
||||
blockStates map[int64]blockState
|
||||
|
||||
// a map of peerID to schedule specific peer struct `scPeer` used to keep
|
||||
// track of peer specific state
|
||||
peers map[p2p.ID]*scPeer
|
||||
|
||||
// a map of heights to the peer we are waiting for a response from
|
||||
pendingBlocks map[int64]p2p.ID
|
||||
|
||||
// the time at which a block was put in blockStatePending
|
||||
pendingTime map[int64]time.Time
|
||||
|
||||
// the peerID of the peer which put the block in blockStateReceived
|
||||
receivedBlocks map[int64]p2p.ID
|
||||
}
|
||||
|
||||
func newSchedule(initHeight int64) *schedule {
|
||||
sc := schedule{
|
||||
initHeight: initHeight,
|
||||
blockStates: make(map[int64]blockState),
|
||||
peers: make(map[p2p.ID]*scPeer),
|
||||
pendingBlocks: make(map[int64]p2p.ID),
|
||||
pendingTime: make(map[int64]time.Time),
|
||||
receivedBlocks: make(map[int64]p2p.ID),
|
||||
}
|
||||
|
||||
sc.setStateAtHeight(initHeight, blockStateNew)
|
||||
|
||||
return &sc
|
||||
}
|
||||
|
||||
func (sc *schedule) addPeer(peerID p2p.ID) error {
|
||||
if _, ok := sc.peers[peerID]; ok {
|
||||
return fmt.Errorf("Cannot add duplicate peer %s", peerID)
|
||||
}
|
||||
sc.peers[peerID] = newScPeer(peerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *schedule) touchPeer(peerID p2p.ID, time time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("Couldn't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return fmt.Errorf("Tried to touch peer in peerStateRemoved")
|
||||
}
|
||||
|
||||
peer.lastTouched = time
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *schedule) removePeer(peerID p2p.ID) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("Couldn't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return fmt.Errorf("Tried to remove peer %s in peerStateRemoved", peerID)
|
||||
}
|
||||
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
sc.setStateAtHeight(height, blockStateNew)
|
||||
delete(sc.pendingTime, height)
|
||||
delete(sc.pendingBlocks, height)
|
||||
}
|
||||
}
|
||||
|
||||
for height, rcvPeerID := range sc.receivedBlocks {
|
||||
if rcvPeerID == peerID {
|
||||
sc.setStateAtHeight(height, blockStateNew)
|
||||
delete(sc.receivedBlocks, height)
|
||||
}
|
||||
}
|
||||
|
||||
peer.state = peerStateRemoved
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *schedule) setPeerHeight(peerID p2p.ID, height int64) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("Can't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return fmt.Errorf("Cannot set peer height for a peer in peerStateRemoved")
|
||||
}
|
||||
|
||||
if height < peer.height {
|
||||
return fmt.Errorf("Cannot move peer height lower. from %d to %d", peer.height, height)
|
||||
}
|
||||
|
||||
peer.height = height
|
||||
peer.state = peerStateReady
|
||||
for i := sc.minHeight(); i <= height; i++ {
|
||||
if sc.getStateAtHeight(i) == blockStateUnknown {
|
||||
sc.setStateAtHeight(i, blockStateNew)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *schedule) getStateAtHeight(height int64) blockState {
|
||||
if height < sc.initHeight {
|
||||
return blockStateProcessed
|
||||
} else if state, ok := sc.blockStates[height]; ok {
|
||||
return state
|
||||
} else {
|
||||
return blockStateUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *schedule) getPeersAtHeight(height int64) []*scPeer {
|
||||
peers := []*scPeer{}
|
||||
for _, peer := range sc.peers {
|
||||
if peer.height >= height {
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
}
|
||||
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *schedule) peersInactiveSince(duration time.Duration, now time.Time) []p2p.ID {
|
||||
peers := []p2p.ID{}
|
||||
for _, peer := range sc.peers {
|
||||
if now.Sub(peer.lastTouched) > duration {
|
||||
peers = append(peers, peer.peerID)
|
||||
}
|
||||
}
|
||||
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *schedule) peersSlowerThan(minSpeed int64) []p2p.ID {
|
||||
peers := []p2p.ID{}
|
||||
for _, peer := range sc.peers {
|
||||
if peer.lastRate < minSpeed {
|
||||
peers = append(peers, peer.peerID)
|
||||
}
|
||||
}
|
||||
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *schedule) setStateAtHeight(height int64, state blockState) {
|
||||
sc.blockStates[height] = state
|
||||
}
|
||||
|
||||
func (sc *schedule) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("Can't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return fmt.Errorf("Cannot receive blocks from removed peer %s", peerID)
|
||||
}
|
||||
|
||||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
|
||||
return fmt.Errorf("Received block %d from peer %s without being requested", height, peerID)
|
||||
}
|
||||
|
||||
pendingTime, ok := sc.pendingTime[height]
|
||||
if !ok || now.Sub(pendingTime) <= 0 {
|
||||
return fmt.Errorf("Clock error. Block %d received at %s but requested at %s",
|
||||
height, pendingTime, now)
|
||||
}
|
||||
|
||||
peer.lastRate = size / int64(now.Sub(pendingTime).Seconds())
|
||||
|
||||
sc.setStateAtHeight(height, blockStateReceived)
|
||||
delete(sc.pendingBlocks, height)
|
||||
delete(sc.pendingTime, height)
|
||||
|
||||
sc.receivedBlocks[height] = peerID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *schedule) markPending(peerID p2p.ID, height int64, time time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("Can't find peer %s", peerID)
|
||||
}
|
||||
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateNew {
|
||||
return fmt.Errorf("Block %d should be in blockStateNew but was %s", height, state)
|
||||
}
|
||||
|
||||
if peer.state != peerStateReady {
|
||||
return fmt.Errorf("Cannot schedule %d from %s in %s", height, peerID, peer.state)
|
||||
}
|
||||
|
||||
if height > peer.height {
|
||||
return fmt.Errorf("Cannot request height %d from peer %s who is at height %d",
|
||||
height, peerID, peer.height)
|
||||
}
|
||||
|
||||
sc.setStateAtHeight(height, blockStatePending)
|
||||
sc.pendingBlocks[height] = peerID
|
||||
// XXX: to make this more accurate we can introduce a message from
|
||||
// the IO routine which indicates the time the request was put on the wire
|
||||
sc.pendingTime[height] = time
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *schedule) markProcessed(height int64) error {
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateReceived {
|
||||
return fmt.Errorf("Can't mark height %d received from block state %s", height, state)
|
||||
}
|
||||
|
||||
delete(sc.receivedBlocks, height)
|
||||
|
||||
sc.setStateAtHeight(height, blockStateProcessed)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// allBlockProcessed returns true if all blocks are in blockStateProcessed and
|
||||
// determines if the schedule has been completed
|
||||
func (sc *schedule) allBlocksProcessed() bool {
|
||||
for _, state := range sc.blockStates {
|
||||
if state != blockStateProcessed {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// highest block | state == blockStateNew
|
||||
func (sc *schedule) maxHeight() int64 {
|
||||
var max int64 = 0
|
||||
for height, state := range sc.blockStates {
|
||||
if state == blockStateNew && height > max {
|
||||
max = height
|
||||
}
|
||||
}
|
||||
|
||||
return max
|
||||
}
|
||||
|
||||
// lowest block | state == blockStateNew
|
||||
func (sc *schedule) minHeight() int64 {
|
||||
var min int64 = math.MaxInt64
|
||||
for height, state := range sc.blockStates {
|
||||
if state == blockStateNew && height < min {
|
||||
min = height
|
||||
}
|
||||
}
|
||||
|
||||
return min
|
||||
}
|
||||
|
||||
func (sc *schedule) pendingFrom(peerID p2p.ID) []int64 {
|
||||
heights := []int64{}
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
heights = append(heights, height)
|
||||
}
|
||||
}
|
||||
return heights
|
||||
}
|
||||
|
||||
func (sc *schedule) selectPeer(peers []*scPeer) *scPeer {
|
||||
// FIXME: properPeerSelector
|
||||
s := rand.NewSource(time.Now().Unix())
|
||||
r := rand.New(s)
|
||||
|
||||
return peers[r.Intn(len(peers))]
|
||||
}
|
||||
|
||||
// XXX: this duplicates the logic of peersInactiveSince and peersSlowerThan
|
||||
func (sc *schedule) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.ID {
|
||||
prunable := []p2p.ID{}
|
||||
for peerID, peer := range sc.peers {
|
||||
if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate {
|
||||
prunable = append(prunable, peerID)
|
||||
}
|
||||
}
|
||||
|
||||
return prunable
|
||||
}
|
||||
|
||||
func (sc *schedule) numBlockInState(targetState blockState) uint32 {
|
||||
var num uint32 = 0
|
||||
for _, state := range sc.blockStates {
|
||||
if state == targetState {
|
||||
num++
|
||||
}
|
||||
}
|
||||
return num
|
||||
}
|
272
blockchain/v2/schedule_test.go
Normal file
272
blockchain/v2/schedule_test.go
Normal file
@@ -0,0 +1,272 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
func TestScheduleInit(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
sc = newSchedule(initHeight)
|
||||
)
|
||||
|
||||
assert.Equal(t, blockStateNew, sc.getStateAtHeight(initHeight))
|
||||
assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(initHeight-1))
|
||||
assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight+1))
|
||||
}
|
||||
|
||||
func TestAddPeer(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerIDTwo p2p.ID = "2"
|
||||
sc = newSchedule(initHeight)
|
||||
)
|
||||
|
||||
assert.Nil(t, sc.addPeer(peerID))
|
||||
assert.Nil(t, sc.addPeer(peerIDTwo))
|
||||
assert.Error(t, sc.addPeer(peerID))
|
||||
}
|
||||
|
||||
func TestTouchPeer(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
sc = newSchedule(initHeight)
|
||||
now = time.Now()
|
||||
)
|
||||
|
||||
assert.Error(t, sc.touchPeer(peerID, now),
|
||||
"Touching an unknown peer should return errPeerNotFound")
|
||||
|
||||
assert.Nil(t, sc.addPeer(peerID),
|
||||
"Adding a peer should return no error")
|
||||
assert.Nil(t, sc.touchPeer(peerID, now),
|
||||
"Touching a peer should return no error")
|
||||
|
||||
threshold := 10 * time.Second
|
||||
assert.Empty(t, sc.peersInactiveSince(threshold, now.Add(9*time.Second)),
|
||||
"Expected no peers to have been touched over 9 seconds")
|
||||
assert.Containsf(t, sc.peersInactiveSince(threshold, now.Add(11*time.Second)), peerID,
|
||||
"Expected one %s to have been touched over 10 seconds ago", peerID)
|
||||
}
|
||||
|
||||
func TestPeerHeight(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerHeight int64 = 20
|
||||
sc = newSchedule(initHeight)
|
||||
)
|
||||
|
||||
assert.NoError(t, sc.addPeer(peerID),
|
||||
"Adding a peer should return no error")
|
||||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight))
|
||||
for i := initHeight; i <= peerHeight; i++ {
|
||||
assert.Equal(t, sc.getStateAtHeight(i), blockStateNew,
|
||||
"Expected all blocks to be in blockStateNew")
|
||||
peerIDs := []p2p.ID{}
|
||||
for _, peer := range sc.getPeersAtHeight(i) {
|
||||
peerIDs = append(peerIDs, peer.peerID)
|
||||
}
|
||||
|
||||
assert.Containsf(t, peerIDs, peerID,
|
||||
"Expected %s to have block %d", peerID, i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransitionPending(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerIDTwo p2p.ID = "2"
|
||||
peerHeight int64 = 20
|
||||
sc = newSchedule(initHeight)
|
||||
now = time.Now()
|
||||
)
|
||||
|
||||
assert.NoError(t, sc.addPeer(peerID),
|
||||
"Adding a peer should return no error")
|
||||
assert.Nil(t, sc.addPeer(peerIDTwo),
|
||||
"Adding a peer should return no error")
|
||||
|
||||
assert.Error(t, sc.markPending(peerID, peerHeight, now),
|
||||
"Expected scheduling a block from a peer in peerStateNew to fail")
|
||||
|
||||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight),
|
||||
"Expected setPeerHeight to return no error")
|
||||
assert.NoError(t, sc.setPeerHeight(peerIDTwo, peerHeight),
|
||||
"Expected setPeerHeight to return no error")
|
||||
|
||||
assert.NoError(t, sc.markPending(peerID, peerHeight, now),
|
||||
"Expected markingPending new block to succeed")
|
||||
assert.Error(t, sc.markPending(peerIDTwo, peerHeight, now),
|
||||
"Expected markingPending by a second peer to fail")
|
||||
|
||||
assert.Equal(t, blockStatePending, sc.getStateAtHeight(peerHeight),
|
||||
"Expected the block to to be in blockStatePending")
|
||||
|
||||
assert.NoError(t, sc.removePeer(peerID),
|
||||
"Expected removePeer to return no error")
|
||||
|
||||
assert.Equal(t, blockStateNew, sc.getStateAtHeight(peerHeight),
|
||||
"Expected the block to to be in blockStateNew")
|
||||
|
||||
assert.Error(t, sc.markPending(peerID, peerHeight, now),
|
||||
"Expected markingPending removed peer to fail")
|
||||
|
||||
assert.NoError(t, sc.markPending(peerIDTwo, peerHeight, now),
|
||||
"Expected markingPending on a ready peer to succeed")
|
||||
|
||||
assert.Equal(t, blockStatePending, sc.getStateAtHeight(peerHeight),
|
||||
"Expected the block to to be in blockStatePending")
|
||||
}
|
||||
|
||||
func TestTransitionReceived(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerIDTwo p2p.ID = "2"
|
||||
peerHeight int64 = 20
|
||||
blockSize int64 = 1024
|
||||
sc = newSchedule(initHeight)
|
||||
now = time.Now()
|
||||
receivedAt = now.Add(1 * time.Second)
|
||||
)
|
||||
|
||||
assert.NoError(t, sc.addPeer(peerID),
|
||||
"Expected adding peer %s to succeed", peerID)
|
||||
assert.NoError(t, sc.addPeer(peerIDTwo),
|
||||
"Expected adding peer %s to succeed", peerIDTwo)
|
||||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight),
|
||||
"Expected setPeerHeight to return no error")
|
||||
assert.NoErrorf(t, sc.setPeerHeight(peerIDTwo, peerHeight),
|
||||
"Expected setPeerHeight on %s to %d to succeed", peerIDTwo, peerHeight)
|
||||
assert.NoError(t, sc.markPending(peerID, initHeight, now),
|
||||
"Expected markingPending new block to succeed")
|
||||
|
||||
assert.Error(t, sc.markReceived(peerIDTwo, initHeight, blockSize, receivedAt),
|
||||
"Expected marking markReceived from a non requesting peer to fail")
|
||||
|
||||
assert.NoError(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt),
|
||||
"Expected marking markReceived on a pending block to succeed")
|
||||
|
||||
assert.Error(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt),
|
||||
"Expected marking markReceived on received block to fail")
|
||||
|
||||
assert.Equalf(t, blockStateReceived, sc.getStateAtHeight(initHeight),
|
||||
"Expected block %d to be blockHeightReceived", initHeight)
|
||||
|
||||
assert.NoErrorf(t, sc.removePeer(peerID),
|
||||
"Expected removePeer removing %s to succeed", peerID)
|
||||
|
||||
assert.Equalf(t, blockStateNew, sc.getStateAtHeight(initHeight),
|
||||
"Expected block %d to be blockStateNew", initHeight)
|
||||
|
||||
assert.NoErrorf(t, sc.markPending(peerIDTwo, initHeight, now),
|
||||
"Expected markingPending %d from %s to succeed", initHeight, peerIDTwo)
|
||||
assert.NoErrorf(t, sc.markReceived(peerIDTwo, initHeight, blockSize, receivedAt),
|
||||
"Expected marking markReceived %d from %s to succeed", initHeight, peerIDTwo)
|
||||
assert.Equalf(t, blockStateReceived, sc.getStateAtHeight(initHeight),
|
||||
"Expected block %d to be blockStateReceived", initHeight)
|
||||
}
|
||||
|
||||
func TestTransitionProcessed(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerHeight int64 = 20
|
||||
blockSize int64 = 1024
|
||||
sc = newSchedule(initHeight)
|
||||
now = time.Now()
|
||||
receivedAt = now.Add(1 * time.Second)
|
||||
)
|
||||
|
||||
assert.NoError(t, sc.addPeer(peerID),
|
||||
"Expected adding peer %s to succeed", peerID)
|
||||
assert.NoErrorf(t, sc.setPeerHeight(peerID, peerHeight),
|
||||
"Expected setPeerHeight on %s to %d to succeed", peerID, peerHeight)
|
||||
assert.NoError(t, sc.markPending(peerID, initHeight, now),
|
||||
"Expected markingPending new block to succeed")
|
||||
assert.NoError(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt),
|
||||
"Expected marking markReceived on a pending block to succeed")
|
||||
|
||||
assert.Error(t, sc.markProcessed(initHeight+1),
|
||||
"Expected marking %d as processed to fail", initHeight+1)
|
||||
assert.NoError(t, sc.markProcessed(initHeight),
|
||||
"Expected marking %d as processed to succeed", initHeight)
|
||||
|
||||
assert.Equalf(t, blockStateProcessed, sc.getStateAtHeight(initHeight),
|
||||
"Expected block %d to be blockStateProcessed", initHeight)
|
||||
|
||||
assert.NoError(t, sc.removePeer(peerID),
|
||||
"Expected removing peer %s to succeed", peerID)
|
||||
|
||||
assert.Equalf(t, blockStateProcessed, sc.getStateAtHeight(initHeight),
|
||||
"Expected block %d to be blockStateProcessed", initHeight)
|
||||
}
|
||||
|
||||
func TestMinMaxHeight(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerHeight int64 = 20
|
||||
sc = newSchedule(initHeight)
|
||||
now = time.Now()
|
||||
)
|
||||
|
||||
assert.Equal(t, initHeight, sc.minHeight(),
|
||||
"Expected min height to be the initialized height")
|
||||
|
||||
assert.Equal(t, initHeight, sc.maxHeight(),
|
||||
"Expected max height to be the initialized height")
|
||||
|
||||
assert.NoError(t, sc.addPeer(peerID),
|
||||
"Adding a peer should return no error")
|
||||
|
||||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight),
|
||||
"Expected setPeerHeight to return no error")
|
||||
|
||||
assert.Equal(t, peerHeight, sc.maxHeight(),
|
||||
"Expected max height to increase to peerHeight")
|
||||
|
||||
assert.Nil(t, sc.markPending(peerID, initHeight, now.Add(1*time.Second)),
|
||||
"Expected marking initHeight as pending to return no error")
|
||||
|
||||
assert.Equal(t, initHeight+1, sc.minHeight(),
|
||||
"Expected marking initHeight as pending to move minHeight forward")
|
||||
}
|
||||
|
||||
func TestPeersSlowerThan(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerHeight int64 = 20
|
||||
blockSize int64 = 1024
|
||||
sc = newSchedule(initHeight)
|
||||
now = time.Now()
|
||||
receivedAt = now.Add(1 * time.Second)
|
||||
)
|
||||
|
||||
assert.NoError(t, sc.addPeer(peerID),
|
||||
"Adding a peer should return no error")
|
||||
|
||||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight),
|
||||
"Expected setPeerHeight to return no error")
|
||||
|
||||
assert.NoError(t, sc.markPending(peerID, peerHeight, now),
|
||||
"Expected markingPending on to return no error")
|
||||
|
||||
assert.NoError(t, sc.markReceived(peerID, peerHeight, blockSize, receivedAt),
|
||||
"Expected markingPending on to return no error")
|
||||
|
||||
assert.Empty(t, sc.peersSlowerThan(blockSize-1),
|
||||
"expected no peers to be slower than blockSize-1 bytes/sec")
|
||||
|
||||
assert.Containsf(t, sc.peersSlowerThan(blockSize+1), peerID,
|
||||
"expected %s to be slower than blockSize+1 bytes/sec", peerID)
|
||||
}
|
@@ -48,15 +48,17 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
rs := privval.NewSignerServiceEndpoint(logger, *chainID, pv, dialer)
|
||||
err := rs.Start()
|
||||
sd := privval.NewSignerDialerEndpoint(logger, dialer)
|
||||
ss := privval.NewSignerServer(sd, *chainID, pv)
|
||||
|
||||
err := ss.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {
|
||||
err := rs.Stop()
|
||||
err := ss.Stop()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
@@ -80,7 +81,7 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
logger.Info("Constructing Verifier...")
|
||||
cert, err := proxy.NewVerifier(chainID, home, node, logger, cacheSize)
|
||||
if err != nil {
|
||||
return cmn.ErrorWrap(err, "constructing Verifier")
|
||||
return errors.Wrap(err, "constructing Verifier")
|
||||
}
|
||||
cert.SetLogger(logger)
|
||||
sc := proxy.SecureClient(node, cert)
|
||||
@@ -88,7 +89,7 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
logger.Info("Starting proxy...")
|
||||
err = proxy.StartProxy(sc, listenAddr, logger, maxOpenConnections)
|
||||
if err != nil {
|
||||
return cmn.ErrorWrap(err, "starting proxy")
|
||||
return errors.Wrap(err, "starting proxy")
|
||||
}
|
||||
|
||||
// Run forever
|
||||
|
@@ -165,7 +165,7 @@ func TestRootConfig(t *testing.T) {
|
||||
func WriteConfigVals(dir string, vals map[string]string) error {
|
||||
data := ""
|
||||
for k, v := range vals {
|
||||
data = data + fmt.Sprintf("%s = \"%s\"\n", k, v)
|
||||
data += fmt.Sprintf("%s = \"%s\"\n", k, v)
|
||||
}
|
||||
cfile := filepath.Join(dir, "config.toml")
|
||||
return ioutil.WriteFile(cfile, []byte(data), 0666)
|
||||
|
@@ -19,7 +19,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().String("priv_validator_laddr", config.PrivValidatorListenAddr, "Socket address to listen on for connections from external priv_validator process")
|
||||
|
||||
// node flags
|
||||
cmd.Flags().Bool("fast_sync", config.FastSync, "Fast blockchain syncing")
|
||||
cmd.Flags().Bool("fast_sync", config.FastSyncMode, "Fast blockchain syncing")
|
||||
|
||||
// abci flags
|
||||
cmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or one of: 'kvstore', 'persistent_kvstore', 'counter', 'counter_serial' or 'noop' for local testing.")
|
||||
|
@@ -64,6 +64,7 @@ type Config struct {
|
||||
RPC *RPCConfig `mapstructure:"rpc"`
|
||||
P2P *P2PConfig `mapstructure:"p2p"`
|
||||
Mempool *MempoolConfig `mapstructure:"mempool"`
|
||||
FastSync *FastSyncConfig `mapstructure:"fastsync"`
|
||||
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
||||
TxIndex *TxIndexConfig `mapstructure:"tx_index"`
|
||||
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
|
||||
@@ -76,6 +77,7 @@ func DefaultConfig() *Config {
|
||||
RPC: DefaultRPCConfig(),
|
||||
P2P: DefaultP2PConfig(),
|
||||
Mempool: DefaultMempoolConfig(),
|
||||
FastSync: DefaultFastSyncConfig(),
|
||||
Consensus: DefaultConsensusConfig(),
|
||||
TxIndex: DefaultTxIndexConfig(),
|
||||
Instrumentation: DefaultInstrumentationConfig(),
|
||||
@@ -89,6 +91,7 @@ func TestConfig() *Config {
|
||||
RPC: TestRPCConfig(),
|
||||
P2P: TestP2PConfig(),
|
||||
Mempool: TestMempoolConfig(),
|
||||
FastSync: TestFastSyncConfig(),
|
||||
Consensus: TestConsensusConfig(),
|
||||
TxIndex: TestTxIndexConfig(),
|
||||
Instrumentation: TestInstrumentationConfig(),
|
||||
@@ -120,6 +123,9 @@ func (cfg *Config) ValidateBasic() error {
|
||||
if err := cfg.Mempool.ValidateBasic(); err != nil {
|
||||
return errors.Wrap(err, "Error in [mempool] section")
|
||||
}
|
||||
if err := cfg.FastSync.ValidateBasic(); err != nil {
|
||||
return errors.Wrap(err, "Error in [fastsync] section")
|
||||
}
|
||||
if err := cfg.Consensus.ValidateBasic(); err != nil {
|
||||
return errors.Wrap(err, "Error in [consensus] section")
|
||||
}
|
||||
@@ -151,7 +157,7 @@ type BaseConfig struct {
|
||||
// If this node is many blocks behind the tip of the chain, FastSync
|
||||
// allows them to catchup quickly by downloading blocks in parallel
|
||||
// and verifying their commits
|
||||
FastSync bool `mapstructure:"fast_sync"`
|
||||
FastSyncMode bool `mapstructure:"fast_sync"`
|
||||
|
||||
// Database backend: goleveldb | cleveldb | boltdb
|
||||
// * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
||||
@@ -216,7 +222,7 @@ func DefaultBaseConfig() BaseConfig {
|
||||
LogLevel: DefaultPackageLogLevels(),
|
||||
LogFormat: LogFormatPlain,
|
||||
ProfListenAddress: "",
|
||||
FastSync: true,
|
||||
FastSyncMode: true,
|
||||
FilterPeers: false,
|
||||
DBBackend: "goleveldb",
|
||||
DBPath: "data",
|
||||
@@ -228,7 +234,7 @@ func TestBaseConfig() BaseConfig {
|
||||
cfg := DefaultBaseConfig()
|
||||
cfg.chainID = "tendermint_test"
|
||||
cfg.ProxyApp = "kvstore"
|
||||
cfg.FastSync = false
|
||||
cfg.FastSyncMode = false
|
||||
cfg.DBBackend = "memdb"
|
||||
return cfg
|
||||
}
|
||||
@@ -351,6 +357,12 @@ type RPCConfig struct {
|
||||
// See https://github.com/tendermint/tendermint/issues/3435
|
||||
TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout_broadcast_tx_commit"`
|
||||
|
||||
// Maximum size of request body, in bytes
|
||||
MaxBodyBytes int64 `mapstructure:"max_body_bytes"`
|
||||
|
||||
// Maximum size of request header, in bytes
|
||||
MaxHeaderBytes int `mapstructure:"max_header_bytes"`
|
||||
|
||||
// The path to a file containing certificate that is used to create the HTTPS server.
|
||||
// Migth be either absolute path or path related to tendermint's config directory.
|
||||
//
|
||||
@@ -385,6 +397,9 @@ func DefaultRPCConfig() *RPCConfig {
|
||||
MaxSubscriptionsPerClient: 5,
|
||||
TimeoutBroadcastTxCommit: 10 * time.Second,
|
||||
|
||||
MaxBodyBytes: int64(1000000), // 1MB
|
||||
MaxHeaderBytes: 1 << 20, // same as the net/http default
|
||||
|
||||
TLSCertFile: "",
|
||||
TLSKeyFile: "",
|
||||
}
|
||||
@@ -417,6 +432,12 @@ func (cfg *RPCConfig) ValidateBasic() error {
|
||||
if cfg.TimeoutBroadcastTxCommit < 0 {
|
||||
return errors.New("timeout_broadcast_tx_commit can't be negative")
|
||||
}
|
||||
if cfg.MaxBodyBytes < 0 {
|
||||
return errors.New("max_body_bytes can't be negative")
|
||||
}
|
||||
if cfg.MaxHeaderBytes < 0 {
|
||||
return errors.New("max_header_bytes can't be negative")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -616,6 +637,7 @@ type MempoolConfig struct {
|
||||
Size int `mapstructure:"size"`
|
||||
MaxTxsBytes int64 `mapstructure:"max_txs_bytes"`
|
||||
CacheSize int `mapstructure:"cache_size"`
|
||||
MaxTxBytes int `mapstructure:"max_tx_bytes"`
|
||||
}
|
||||
|
||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
|
||||
@@ -629,6 +651,7 @@ func DefaultMempoolConfig() *MempoolConfig {
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
}
|
||||
}
|
||||
|
||||
@@ -661,9 +684,44 @@ func (cfg *MempoolConfig) ValidateBasic() error {
|
||||
if cfg.CacheSize < 0 {
|
||||
return errors.New("cache_size can't be negative")
|
||||
}
|
||||
if cfg.MaxTxBytes < 0 {
|
||||
return errors.New("max_tx_bytes can't be negative")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// FastSyncConfig
|
||||
|
||||
// FastSyncConfig defines the configuration for the Tendermint fast sync service
|
||||
type FastSyncConfig struct {
|
||||
Version string `mapstructure:"version"`
|
||||
}
|
||||
|
||||
// DefaultFastSyncConfig returns a default configuration for the fast sync service
|
||||
func DefaultFastSyncConfig() *FastSyncConfig {
|
||||
return &FastSyncConfig{
|
||||
Version: "v0",
|
||||
}
|
||||
}
|
||||
|
||||
// TestFastSyncConfig returns a default configuration for the fast sync.
|
||||
func TestFastSyncConfig() *FastSyncConfig {
|
||||
return DefaultFastSyncConfig()
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (cfg *FastSyncConfig) ValidateBasic() error {
|
||||
switch cfg.Version {
|
||||
case "v0":
|
||||
return nil
|
||||
case "v1":
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unknown fastsync version %s", cfg.Version)
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// ConsensusConfig
|
||||
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -52,3 +53,116 @@ func TestTLSConfiguration(t *testing.T) {
|
||||
cfg.RPC.TLSKeyFile = "/abs/path/to/file.key"
|
||||
assert.Equal("/abs/path/to/file.key", cfg.RPC.KeyFile())
|
||||
}
|
||||
|
||||
func TestBaseConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestBaseConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with log format
|
||||
cfg.LogFormat = "invalid"
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
func TestRPCConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestRPCConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
"GRPCMaxOpenConnections",
|
||||
"MaxOpenConnections",
|
||||
"MaxSubscriptionClients",
|
||||
"MaxSubscriptionsPerClient",
|
||||
"TimeoutBroadcastTxCommit",
|
||||
"MaxBodyBytes",
|
||||
"MaxHeaderBytes",
|
||||
}
|
||||
|
||||
for _, fieldName := range fieldsToTest {
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1)
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestP2PConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestP2PConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
"MaxNumInboundPeers",
|
||||
"MaxNumOutboundPeers",
|
||||
"FlushThrottleTimeout",
|
||||
"MaxPacketMsgPayloadSize",
|
||||
"SendRate",
|
||||
"RecvRate",
|
||||
}
|
||||
|
||||
for _, fieldName := range fieldsToTest {
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1)
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMempoolConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestMempoolConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
"Size",
|
||||
"MaxTxsBytes",
|
||||
"CacheSize",
|
||||
"MaxTxBytes",
|
||||
}
|
||||
|
||||
for _, fieldName := range fieldsToTest {
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1)
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFastSyncConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestFastSyncConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with version
|
||||
cfg.Version = "v1"
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
cfg.Version = "invalid"
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
func TestConsensusConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestConsensusConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
"TimeoutPropose",
|
||||
"TimeoutProposeDelta",
|
||||
"TimeoutPrevote",
|
||||
"TimeoutPrevoteDelta",
|
||||
"TimeoutPrecommit",
|
||||
"TimeoutPrecommitDelta",
|
||||
"TimeoutCommit",
|
||||
"CreateEmptyBlocksInterval",
|
||||
"PeerGossipSleepDuration",
|
||||
"PeerQueryMaj23SleepDuration",
|
||||
}
|
||||
|
||||
for _, fieldName := range fieldsToTest {
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1)
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstrumentationConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestInstrumentationConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with maximum open connections
|
||||
cfg.MaxOpenConnections = -1
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
@@ -79,7 +79,7 @@ moniker = "{{ .BaseConfig.Moniker }}"
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast_sync = {{ .BaseConfig.FastSync }}
|
||||
fast_sync = {{ .BaseConfig.FastSyncMode }}
|
||||
|
||||
# Database backend: goleveldb | cleveldb | boltdb
|
||||
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
||||
@@ -192,6 +192,12 @@ max_subscriptions_per_client = {{ .RPC.MaxSubscriptionsPerClient }}
|
||||
# See https://github.com/tendermint/tendermint/issues/3435
|
||||
timeout_broadcast_tx_commit = "{{ .RPC.TimeoutBroadcastTxCommit }}"
|
||||
|
||||
# Maximum size of request body, in bytes
|
||||
max_body_bytes = {{ .RPC.MaxBodyBytes }}
|
||||
|
||||
# Maximum size of request header, in bytes
|
||||
max_header_bytes = {{ .RPC.MaxHeaderBytes }}
|
||||
|
||||
# The path to a file containing certificate that is used to create the HTTPS server.
|
||||
# Migth be either absolute path or path related to tendermint's config directory.
|
||||
# If the certificate is signed by a certificate authority,
|
||||
@@ -288,6 +294,18 @@ max_txs_bytes = {{ .Mempool.MaxTxsBytes }}
|
||||
# Size of the cache (used to filter transactions we saw earlier) in transactions
|
||||
cache_size = {{ .Mempool.CacheSize }}
|
||||
|
||||
# Maximum size of a single transaction.
|
||||
# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}.
|
||||
max_tx_bytes = {{ .Mempool.MaxTxBytes }}
|
||||
|
||||
##### fast sync configuration options #####
|
||||
[fastsync]
|
||||
|
||||
# Fast Sync version to use:
|
||||
# 1) "v0" (default) - the legacy fast sync implementation
|
||||
# 2) "v1" - refactor of v0 version for better testability
|
||||
version = "{{ .FastSync.Version }}"
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
|
@@ -20,19 +20,19 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -280,7 +280,7 @@ func newConsensusStateWithConfig(thisConfig *cfg.Config, state sm.State, pv type
|
||||
|
||||
func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.State, pv types.PrivValidator, app abci.Application, blockDB dbm.DB) *ConsensusState {
|
||||
// Get BlockStore
|
||||
blockStore := bc.NewBlockStore(blockDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(sync.Mutex)
|
||||
|
@@ -11,10 +11,10 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
// for testing
|
||||
@@ -82,14 +82,14 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||
ensureNewRound(newRoundCh, height, round) // first round at first height
|
||||
ensureNewEventOnChannel(newBlockCh) // first block gets committed
|
||||
|
||||
height = height + 1 // moving to the next height
|
||||
height++ // moving to the next height
|
||||
round = 0
|
||||
|
||||
ensureNewRound(newRoundCh, height, round) // first round at next height
|
||||
deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round
|
||||
ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds())
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
ensureNewRound(newRoundCh, height, round) // wait for the next round
|
||||
ensureNewEventOnChannel(newBlockCh) // now we can commit the block
|
||||
}
|
||||
@@ -155,12 +155,14 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
// and the tx should get removed from the pool
|
||||
err := assertMempool(cs.txNotifier).CheckTx(txBytes, func(r *abci.Response) {
|
||||
if r.GetCheckTx().Code != code.CodeTypeBadNonce {
|
||||
t.Fatalf("expected checktx to return bad nonce, got %v", r)
|
||||
t.Errorf("expected checktx to return bad nonce, got %v", r)
|
||||
return
|
||||
}
|
||||
checkTxRespCh <- struct{}{}
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error after CheckTx: %v", err)
|
||||
t.Errorf("Error after CheckTx: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// check for the tx
|
||||
@@ -180,7 +182,8 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
case <-checkTxRespCh:
|
||||
// success
|
||||
case <-ticker:
|
||||
t.Fatalf("Timed out waiting for tx to return")
|
||||
t.Errorf("Timed out waiting for tx to return")
|
||||
return
|
||||
}
|
||||
|
||||
// Wait until the tx is removed
|
||||
@@ -189,7 +192,8 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
case <-emptyMempoolCh:
|
||||
// success
|
||||
case <-ticker:
|
||||
t.Fatalf("Timed out waiting for tx to be removed")
|
||||
t.Errorf("Timed out waiting for tx to be removed")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -17,29 +17,31 @@ import (
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/p2p/mock"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
//----------------------------------------------
|
||||
// in-process testnets
|
||||
|
||||
func startConsensusNet(t *testing.T, css []*ConsensusState, N int) (
|
||||
func startConsensusNet(t *testing.T, css []*ConsensusState, n int) (
|
||||
[]*ConsensusReactor,
|
||||
[]types.Subscription,
|
||||
[]*types.EventBus,
|
||||
) {
|
||||
reactors := make([]*ConsensusReactor, N)
|
||||
reactors := make([]*ConsensusReactor, n)
|
||||
blocksSubs := make([]types.Subscription, 0)
|
||||
eventBuses := make([]*types.EventBus, N)
|
||||
for i := 0; i < N; i++ {
|
||||
eventBuses := make([]*types.EventBus, n)
|
||||
for i := 0; i < n; i++ {
|
||||
/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
|
||||
if err != nil { t.Fatal(err)}*/
|
||||
reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states
|
||||
@@ -58,7 +60,7 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) (
|
||||
}
|
||||
}
|
||||
// make connected switches and start all reactors
|
||||
p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("CONSENSUS", reactors[i])
|
||||
s.SetLogger(reactors[i].conS.Logger.With("module", "p2p"))
|
||||
return s
|
||||
@@ -68,7 +70,7 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int) (
|
||||
// If we started the state machines before everyone was connected,
|
||||
// we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors
|
||||
// TODO: is this still true with new pubsub?
|
||||
for i := 0; i < N; i++ {
|
||||
for i := 0; i < n; i++ {
|
||||
s := reactors[i].conS.GetState()
|
||||
reactors[i].SwitchToConsensus(s, 0)
|
||||
}
|
||||
@@ -133,7 +135,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
// css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app)
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := bc.NewBlockStore(blockDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(sync.Mutex)
|
||||
@@ -235,7 +237,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
|
||||
// send a tx
|
||||
if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil); err != nil {
|
||||
//t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
@@ -632,3 +634,253 @@ func capture() {
|
||||
count := runtime.Stack(trace, true)
|
||||
fmt.Printf("Stack of %d bytes: %s\n", count, trace)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------
|
||||
// Ensure basic validation of structs is functioning
|
||||
|
||||
func TestNewRoundStepMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
messageHeight int64
|
||||
messageRound int
|
||||
messageStep cstypes.RoundStepType
|
||||
messageLastCommitRound int
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Message", 0, 0, 0x01, 1, false},
|
||||
{"Invalid Message", -1, 0, 0x01, 1, true},
|
||||
{"Invalid Message", 0, -1, 0x01, 1, true},
|
||||
{"Invalid Message", 0, 0, 0x00, 1, true},
|
||||
{"Invalid Message", 0, 0, 0x00, 0, true},
|
||||
{"Invalid Message", 1, 0, 0x01, 0, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
message := NewRoundStepMessage{
|
||||
Height: tc.messageHeight,
|
||||
Round: tc.messageRound,
|
||||
Step: tc.messageStep,
|
||||
LastCommitRound: tc.messageLastCommitRound,
|
||||
}
|
||||
|
||||
assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewValidBlockMessageValidateBasic(t *testing.T) {
|
||||
testBitArray := cmn.NewBitArray(1)
|
||||
testCases := []struct {
|
||||
testName string
|
||||
messageHeight int64
|
||||
messageRound int
|
||||
messageBlockParts *cmn.BitArray
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Message", 0, 0, testBitArray, false},
|
||||
{"Invalid Message", -1, 0, testBitArray, true},
|
||||
{"Invalid Message", 0, -1, testBitArray, true},
|
||||
{"Invalid Message", 0, 0, cmn.NewBitArray(0), true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
message := NewValidBlockMessage{
|
||||
Height: tc.messageHeight,
|
||||
Round: tc.messageRound,
|
||||
BlockParts: tc.messageBlockParts,
|
||||
}
|
||||
|
||||
message.BlockPartsHeader.Total = 1
|
||||
|
||||
assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProposalPOLMessageValidateBasic(t *testing.T) {
|
||||
testBitArray := cmn.NewBitArray(1)
|
||||
testCases := []struct {
|
||||
testName string
|
||||
messageHeight int64
|
||||
messageProposalPOLRound int
|
||||
messageProposalPOL *cmn.BitArray
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Message", 0, 0, testBitArray, false},
|
||||
{"Invalid Message", -1, 0, testBitArray, true},
|
||||
{"Invalid Message", 0, -1, testBitArray, true},
|
||||
{"Invalid Message", 0, 0, cmn.NewBitArray(0), true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
message := ProposalPOLMessage{
|
||||
Height: tc.messageHeight,
|
||||
ProposalPOLRound: tc.messageProposalPOLRound,
|
||||
ProposalPOL: tc.messageProposalPOL,
|
||||
}
|
||||
|
||||
assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPartMessageValidateBasic(t *testing.T) {
|
||||
testPart := new(types.Part)
|
||||
testCases := []struct {
|
||||
testName string
|
||||
messageHeight int64
|
||||
messageRound int
|
||||
messagePart *types.Part
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Message", 0, 0, testPart, false},
|
||||
{"Invalid Message", -1, 0, testPart, true},
|
||||
{"Invalid Message", 0, -1, testPart, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
message := BlockPartMessage{
|
||||
Height: tc.messageHeight,
|
||||
Round: tc.messageRound,
|
||||
Part: tc.messagePart,
|
||||
}
|
||||
|
||||
assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
|
||||
message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)}
|
||||
message.Part.Index = -1
|
||||
|
||||
assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
}
|
||||
|
||||
func TestHasVoteMessageValidateBasic(t *testing.T) {
|
||||
const (
|
||||
validSignedMsgType types.SignedMsgType = 0x01
|
||||
invalidSignedMsgType types.SignedMsgType = 0x03
|
||||
)
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
messageHeight int64
|
||||
messageRound int
|
||||
messageType types.SignedMsgType
|
||||
messageIndex int
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Message", 0, 0, validSignedMsgType, 0, false},
|
||||
{"Invalid Message", -1, 0, validSignedMsgType, 0, true},
|
||||
{"Invalid Message", 0, -1, validSignedMsgType, 0, true},
|
||||
{"Invalid Message", 0, 0, invalidSignedMsgType, 0, true},
|
||||
{"Invalid Message", 0, 0, validSignedMsgType, -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
message := HasVoteMessage{
|
||||
Height: tc.messageHeight,
|
||||
Round: tc.messageRound,
|
||||
Type: tc.messageType,
|
||||
Index: tc.messageIndex,
|
||||
}
|
||||
|
||||
assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVoteSetMaj23MessageValidateBasic(t *testing.T) {
|
||||
const (
|
||||
validSignedMsgType types.SignedMsgType = 0x01
|
||||
invalidSignedMsgType types.SignedMsgType = 0x03
|
||||
)
|
||||
|
||||
validBlockID := types.BlockID{}
|
||||
invalidBlockID := types.BlockID{
|
||||
Hash: cmn.HexBytes{},
|
||||
PartsHeader: types.PartSetHeader{
|
||||
Total: -1,
|
||||
Hash: cmn.HexBytes{},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
messageHeight int64
|
||||
messageRound int
|
||||
messageType types.SignedMsgType
|
||||
messageBlockID types.BlockID
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Message", 0, 0, validSignedMsgType, validBlockID, false},
|
||||
{"Invalid Message", -1, 0, validSignedMsgType, validBlockID, true},
|
||||
{"Invalid Message", 0, -1, validSignedMsgType, validBlockID, true},
|
||||
{"Invalid Message", 0, 0, invalidSignedMsgType, validBlockID, true},
|
||||
{"Invalid Message", 0, 0, validSignedMsgType, invalidBlockID, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
message := VoteSetMaj23Message{
|
||||
Height: tc.messageHeight,
|
||||
Round: tc.messageRound,
|
||||
Type: tc.messageType,
|
||||
BlockID: tc.messageBlockID,
|
||||
}
|
||||
|
||||
assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVoteSetBitsMessageValidateBasic(t *testing.T) {
|
||||
const (
|
||||
validSignedMsgType types.SignedMsgType = 0x01
|
||||
invalidSignedMsgType types.SignedMsgType = 0x03
|
||||
)
|
||||
|
||||
validBlockID := types.BlockID{}
|
||||
invalidBlockID := types.BlockID{
|
||||
Hash: cmn.HexBytes{},
|
||||
PartsHeader: types.PartSetHeader{
|
||||
Total: -1,
|
||||
Hash: cmn.HexBytes{},
|
||||
},
|
||||
}
|
||||
testBitArray := cmn.NewBitArray(1)
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
messageHeight int64
|
||||
messageRound int
|
||||
messageType types.SignedMsgType
|
||||
messageBlockID types.BlockID
|
||||
messageVotes *cmn.BitArray
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Message", 0, 0, validSignedMsgType, validBlockID, testBitArray, false},
|
||||
{"Invalid Message", -1, 0, validSignedMsgType, validBlockID, testBitArray, true},
|
||||
{"Invalid Message", 0, -1, validSignedMsgType, validBlockID, testBitArray, true},
|
||||
{"Invalid Message", 0, 0, invalidSignedMsgType, validBlockID, testBitArray, true},
|
||||
{"Invalid Message", 0, 0, validSignedMsgType, invalidBlockID, testBitArray, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
message := VoteSetBitsMessage{
|
||||
Height: tc.messageHeight,
|
||||
Round: tc.messageRound,
|
||||
Type: tc.messageType,
|
||||
// Votes: tc.messageVotes,
|
||||
BlockID: tc.messageBlockID,
|
||||
}
|
||||
|
||||
assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -13,8 +13,8 @@ import (
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
//auto "github.com/tendermint/tendermint/libs/autofile"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
@@ -141,14 +141,16 @@ func (cs *ConsensusState) catchupReplay(csHeight int64) error {
|
||||
var msg *TimedWALMessage
|
||||
dec := WALDecoder{gr}
|
||||
|
||||
LOOP:
|
||||
for {
|
||||
msg, err = dec.Decode()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if IsDataCorruptionError(err) {
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
break LOOP
|
||||
case IsDataCorruptionError(err):
|
||||
cs.Logger.Error("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight)
|
||||
return err
|
||||
} else if err != nil {
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -320,11 +322,9 @@ func (h *Handshaker) ReplayBlocks(
|
||||
}
|
||||
state.Validators = types.NewValidatorSet(vals)
|
||||
state.NextValidators = types.NewValidatorSet(vals)
|
||||
} else {
|
||||
} else if len(h.genDoc.Validators) == 0 {
|
||||
// If validator set is not set in genesis and still empty after InitChain, exit.
|
||||
if len(h.genDoc.Validators) == 0 {
|
||||
return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain")
|
||||
}
|
||||
return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain")
|
||||
}
|
||||
|
||||
if res.ConsensusParams != nil {
|
||||
@@ -335,19 +335,20 @@ func (h *Handshaker) ReplayBlocks(
|
||||
}
|
||||
|
||||
// First handle edge cases and constraints on the storeBlockHeight.
|
||||
if storeBlockHeight == 0 {
|
||||
switch {
|
||||
case storeBlockHeight == 0:
|
||||
assertAppHashEqualsOneFromState(appHash, state)
|
||||
return appHash, nil
|
||||
|
||||
} else if storeBlockHeight < appBlockHeight {
|
||||
case storeBlockHeight < appBlockHeight:
|
||||
// the app should never be ahead of the store (but this is under app's control)
|
||||
return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight}
|
||||
|
||||
} else if storeBlockHeight < stateBlockHeight {
|
||||
case storeBlockHeight < stateBlockHeight:
|
||||
// the state should never be ahead of the store (this is under tendermint's control)
|
||||
panic(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
|
||||
|
||||
} else if storeBlockHeight > stateBlockHeight+1 {
|
||||
case storeBlockHeight > stateBlockHeight+1:
|
||||
// store should be at most one ahead of the state (this is under tendermint's control)
|
||||
panic(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
|
||||
}
|
||||
@@ -371,12 +372,13 @@ func (h *Handshaker) ReplayBlocks(
|
||||
} else if storeBlockHeight == stateBlockHeight+1 {
|
||||
// We saved the block in the store but haven't updated the state,
|
||||
// so we'll need to replay a block using the WAL.
|
||||
if appBlockHeight < stateBlockHeight {
|
||||
switch {
|
||||
case appBlockHeight < stateBlockHeight:
|
||||
// the app is further behind than it should be, so replay blocks
|
||||
// but leave the last block to go through the WAL
|
||||
return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true)
|
||||
|
||||
} else if appBlockHeight == stateBlockHeight {
|
||||
case appBlockHeight == stateBlockHeight:
|
||||
// We haven't run Commit (both the state and app are one block behind),
|
||||
// so replayBlock with the real app.
|
||||
// NOTE: We could instead use the cs.WAL on cs.Start,
|
||||
@@ -385,7 +387,7 @@ func (h *Handshaker) ReplayBlocks(
|
||||
state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus())
|
||||
return state.AppHash, err
|
||||
|
||||
} else if appBlockHeight == storeBlockHeight {
|
||||
case appBlockHeight == storeBlockHeight:
|
||||
// We ran Commit, but didn't save the state, so replayBlock with mock app.
|
||||
abciResponses, err := sm.LoadABCIResponses(h.stateDB, storeBlockHeight)
|
||||
if err != nil {
|
||||
|
@@ -10,15 +10,15 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -231,10 +231,8 @@ func (pb *playback) replayConsoleLoop() int {
|
||||
fmt.Println("back takes an integer argument")
|
||||
} else if i > pb.count {
|
||||
fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count)
|
||||
} else {
|
||||
if err := pb.replayReset(i, newStepSub); err != nil {
|
||||
pb.cs.Logger.Error("Replay reset error", "err", err)
|
||||
}
|
||||
} else if err := pb.replayReset(i, newStepSub); err != nil {
|
||||
pb.cs.Logger.Error("Replay reset error", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -280,7 +278,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
dbType := dbm.DBBackendType(config.DBBackend)
|
||||
// Get BlockStore
|
||||
blockStoreDB := dbm.NewDB("blockstore", dbType, config.DBDir())
|
||||
blockStore := bc.NewBlockStore(blockStoreDB)
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
|
||||
// Get State
|
||||
stateDB := dbm.NewDB("state", dbType, config.DBDir())
|
||||
|
@@ -22,15 +22,14 @@ import (
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -546,8 +545,7 @@ func TestMockProxyApp(t *testing.T) {
|
||||
abciRes.DeliverTx = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTx))
|
||||
// Execute transactions and get hash.
|
||||
proxyCb := func(req *abci.Request, res *abci.Response) {
|
||||
switch r := res.Value.(type) {
|
||||
case *abci.Response_DeliverTx:
|
||||
if r, ok := res.Value.(*abci.Response_DeliverTx); ok {
|
||||
// TODO: make use of res.Log
|
||||
// TODO: make use of this info
|
||||
// Blocks may include invalid txs.
|
||||
@@ -849,31 +847,14 @@ func makeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Bl
|
||||
return blocks
|
||||
}
|
||||
|
||||
func makeVote(header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote {
|
||||
addr := privVal.GetPubKey().Address()
|
||||
idx, _ := valset.GetByAddress(addr)
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: idx,
|
||||
Height: header.Height,
|
||||
Round: 1,
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: types.PrecommitType,
|
||||
BlockID: blockID,
|
||||
}
|
||||
|
||||
privVal.SignVote(header.ChainID, vote)
|
||||
|
||||
return vote
|
||||
}
|
||||
|
||||
func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta,
|
||||
privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) {
|
||||
|
||||
lastCommit := types.NewCommit(types.BlockID{}, nil)
|
||||
if height > 1 {
|
||||
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVal).CommitSig()
|
||||
lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{vote})
|
||||
vote, _ := types.MakeVote(lastBlock.Header.Height, lastBlockMeta.BlockID, state.Validators, privVal, lastBlock.Header.ChainID)
|
||||
voteCommitSig := vote.CommitSig()
|
||||
lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{voteCommitSig})
|
||||
}
|
||||
|
||||
return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
|
@@ -690,13 +690,13 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
|
||||
cs.statsMsgQueue <- mi
|
||||
}
|
||||
|
||||
if err == ErrAddingVote {
|
||||
// TODO: punish peer
|
||||
// We probably don't want to stop the peer here. The vote does not
|
||||
// necessarily comes from a malicious peer but can be just broadcasted by
|
||||
// a typical peer.
|
||||
// https://github.com/tendermint/tendermint/issues/1281
|
||||
}
|
||||
// if err == ErrAddingVote {
|
||||
// TODO: punish peer
|
||||
// We probably don't want to stop the peer here. The vote does not
|
||||
// necessarily comes from a malicious peer but can be just broadcasted by
|
||||
// a typical peer.
|
||||
// https://github.com/tendermint/tendermint/issues/1281
|
||||
// }
|
||||
|
||||
// NOTE: the vote is broadcast to peers by the reactor listening
|
||||
// for vote events
|
||||
@@ -709,7 +709,7 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if err != nil { // nolint:staticcheck
|
||||
// Causes TestReactorValidatorSetChanges to timeout
|
||||
// https://github.com/tendermint/tendermint/issues/3406
|
||||
// cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round,
|
||||
@@ -924,10 +924,8 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
|
||||
}
|
||||
cs.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal)
|
||||
cs.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block))
|
||||
} else {
|
||||
if !cs.replayMode {
|
||||
cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err)
|
||||
}
|
||||
} else if !cs.replayMode {
|
||||
cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -954,14 +952,15 @@ func (cs *ConsensusState) isProposalComplete() bool {
|
||||
// NOTE: keep it side-effect free for clarity.
|
||||
func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts *types.PartSet) {
|
||||
var commit *types.Commit
|
||||
if cs.Height == 1 {
|
||||
switch {
|
||||
case cs.Height == 1:
|
||||
// We're creating a proposal for the first block.
|
||||
// The commit is empty, but not nil.
|
||||
commit = types.NewCommit(types.BlockID{}, nil)
|
||||
} else if cs.LastCommit.HasTwoThirdsMajority() {
|
||||
case cs.LastCommit.HasTwoThirdsMajority():
|
||||
// Make the commit from LastCommit
|
||||
commit = cs.LastCommit.MakeCommit()
|
||||
} else {
|
||||
default:
|
||||
// This shouldn't happen.
|
||||
cs.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block.")
|
||||
return
|
||||
@@ -1227,9 +1226,10 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader)
|
||||
cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent())
|
||||
cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState)
|
||||
} else {
|
||||
// We just need to keep waiting.
|
||||
}
|
||||
// else {
|
||||
// We just need to keep waiting.
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1518,9 +1518,11 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, err
|
||||
cs.evpool.AddEvidence(voteErr.DuplicateVoteEvidence)
|
||||
return added, err
|
||||
} else {
|
||||
// Probably an invalid signature / Bad peer.
|
||||
// Seems this can also err sometimes with "Unexpected step" - perhaps not from a bad peer ?
|
||||
cs.Logger.Error("Error attempting to add vote", "err", err)
|
||||
// Either
|
||||
// 1) bad peer OR
|
||||
// 2) not a bad peer? this can also err sometimes with "Unexpected step" OR
|
||||
// 3) tmkms use with multiple validators connecting to a single tmkms instance (https://github.com/tendermint/tendermint/issues/3839).
|
||||
cs.Logger.Info("Error attempting to add vote", "err", err)
|
||||
return added, ErrAddingVote
|
||||
}
|
||||
}
|
||||
@@ -1629,17 +1631,18 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
|
||||
}
|
||||
|
||||
// If +2/3 prevotes for *anything* for future round:
|
||||
if cs.Round < vote.Round && prevotes.HasTwoThirdsAny() {
|
||||
switch {
|
||||
case cs.Round < vote.Round && prevotes.HasTwoThirdsAny():
|
||||
// Round-skip if there is any 2/3+ of votes ahead of us
|
||||
cs.enterNewRound(height, vote.Round)
|
||||
} else if cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step { // current round
|
||||
case cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step: // current round
|
||||
blockID, ok := prevotes.TwoThirdsMajority()
|
||||
if ok && (cs.isProposalComplete() || len(blockID.Hash) == 0) {
|
||||
cs.enterPrecommit(height, vote.Round)
|
||||
} else if prevotes.HasTwoThirdsAny() {
|
||||
cs.enterPrevoteWait(height, vote.Round)
|
||||
}
|
||||
} else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round {
|
||||
case cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round:
|
||||
// If the proposal is now complete, enter prevote of cs.Round.
|
||||
if cs.isProposalComplete() {
|
||||
cs.enterPrevote(height, cs.Round)
|
||||
|
@@ -181,7 +181,7 @@ func TestStateBadProposal(t *testing.T) {
|
||||
propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, vs2)
|
||||
|
||||
// make the second validator the proposer by incrementing round
|
||||
round = round + 1
|
||||
round++
|
||||
incrementRound(vss[1:]...)
|
||||
|
||||
// make the block bad by tampering with statehash
|
||||
@@ -374,7 +374,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
|
||||
///
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
t.Log("#### ONTO ROUND 1")
|
||||
/*
|
||||
@@ -418,7 +418,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
// then we enterPrecommitWait and timeout into NewRound
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
round = round + 1 // entering new round
|
||||
round++ // entering new round
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
t.Log("#### ONTO ROUND 2")
|
||||
/*
|
||||
@@ -460,7 +460,7 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
|
||||
incrementRound(vs2)
|
||||
|
||||
round = round + 1 // entering new round
|
||||
round++ // entering new round
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
t.Log("#### ONTO ROUND 3")
|
||||
/*
|
||||
@@ -544,7 +544,7 @@ func TestStateLockPOLRelock(t *testing.T) {
|
||||
// timeout to new round
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
//XXX: this isnt guaranteed to get there before the timeoutPropose ...
|
||||
if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -621,8 +621,6 @@ func TestStateLockPOLUnlock(t *testing.T) {
|
||||
// the proposed block should now be locked and our precommit added
|
||||
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
|
||||
// add precommits from the rest
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
@@ -637,7 +635,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
|
||||
lockedBlockHash := rs.LockedBlock.Hash()
|
||||
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
t.Log("#### ONTO ROUND 1")
|
||||
@@ -720,7 +718,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
|
||||
//XXX: this isnt guaranteed to get there before the timeoutPropose ...
|
||||
@@ -757,7 +755,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
|
||||
@@ -823,7 +821,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
|
||||
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
t.Log("### ONTO Round 1")
|
||||
// jump in at round 1
|
||||
startTestRound(cs1, height, round)
|
||||
@@ -852,7 +850,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
|
||||
// timeout of precommit wait to new round
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
// in round 2 we see the polkad block from round 0
|
||||
newProp := types.NewProposal(height, round, 0, propBlockID0)
|
||||
if err := vs3.SignProposal(config.ChainID(), newProp); err != nil {
|
||||
@@ -922,7 +920,7 @@ func TestProposeValidBlock(t *testing.T) {
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
|
||||
@@ -947,14 +945,14 @@ func TestProposeValidBlock(t *testing.T) {
|
||||
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
round = round + 2 // moving to the next round
|
||||
round += 2 // moving to the next round
|
||||
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
t.Log("### ONTO ROUND 3")
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
|
||||
@@ -1046,7 +1044,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) {
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
|
||||
round = round + 1 // move to round in which P0 is not proposer
|
||||
round++ // move to round in which P0 is not proposer
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
startTestRound(cs1, cs1.Height, round)
|
||||
@@ -1125,7 +1123,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) {
|
||||
incrementRound(vss[1:]...)
|
||||
signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
|
||||
rs := cs1.GetRoundState()
|
||||
@@ -1159,7 +1157,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) {
|
||||
incrementRound(vss[1:]...)
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
@@ -1167,7 +1165,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) {
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
}
|
||||
|
||||
@@ -1317,8 +1315,6 @@ func TestStartNextHeightCorrectly(t *testing.T) {
|
||||
// the proposed block should now be locked and our precommit added
|
||||
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
|
||||
// add precommits
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
@@ -1370,8 +1366,6 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
rs = cs1.GetRoundState()
|
||||
|
||||
// add precommits
|
||||
signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2)
|
||||
signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
@@ -1517,7 +1511,7 @@ func TestStateHalt1(t *testing.T) {
|
||||
// timeout to new round
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
round = round + 1 // moving to the next round
|
||||
round++ // moving to the next round
|
||||
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
rs = cs1.GetRoundState()
|
||||
|
@@ -12,16 +12,16 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
db "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
// WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a
|
||||
@@ -55,7 +55,8 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
}
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion
|
||||
sm.SaveState(stateDB, state)
|
||||
blockStore := bc.NewBlockStore(blockStoreDB)
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
|
||||
proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app))
|
||||
proxyApp.SetLogger(logger.With("module", "proxy"))
|
||||
if err := proxyApp.Start(); err != nil {
|
||||
|
@@ -54,7 +54,7 @@ func (privKey PrivKeyEd25519) Bytes() []byte {
|
||||
// incorrect signature.
|
||||
func (privKey PrivKeyEd25519) Sign(msg []byte) ([]byte, error) {
|
||||
signatureBytes := ed25519.Sign(privKey[:], msg)
|
||||
return signatureBytes[:], nil
|
||||
return signatureBytes, nil
|
||||
}
|
||||
|
||||
// PubKey gets the corresponding public key from the private key.
|
||||
@@ -100,7 +100,7 @@ func GenPrivKey() PrivKeyEd25519 {
|
||||
// genPrivKey generates a new ed25519 private key using the provided reader.
|
||||
func genPrivKey(rand io.Reader) PrivKeyEd25519 {
|
||||
seed := make([]byte, 32)
|
||||
_, err := io.ReadFull(rand, seed[:])
|
||||
_, err := io.ReadFull(rand, seed)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@@ -24,10 +24,10 @@ func (zeroReader) Read(buf []byte) (int, error) {
|
||||
|
||||
// BenchmarkKeyGeneration benchmarks the given key generation algorithm using
|
||||
// a dummy reader.
|
||||
func BenchmarkKeyGeneration(b *testing.B, GenerateKey func(reader io.Reader) crypto.PrivKey) {
|
||||
func BenchmarkKeyGeneration(b *testing.B, generateKey func(reader io.Reader) crypto.PrivKey) {
|
||||
var zero zeroReader
|
||||
for i := 0; i < b.N; i++ {
|
||||
GenerateKey(zero)
|
||||
generateKey(zero)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -3,7 +3,7 @@ package merkle
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
//----------------------------------------
|
||||
@@ -44,11 +44,11 @@ func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (er
|
||||
key := op.GetKey()
|
||||
if len(key) != 0 {
|
||||
if len(keys) == 0 {
|
||||
return cmn.NewError("Key path has insufficient # of parts: expected no more keys but got %+v", string(key))
|
||||
return errors.Errorf("Key path has insufficient # of parts: expected no more keys but got %+v", string(key))
|
||||
}
|
||||
lastKey := keys[len(keys)-1]
|
||||
if !bytes.Equal(lastKey, key) {
|
||||
return cmn.NewError("Key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key))
|
||||
return errors.Errorf("Key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key))
|
||||
}
|
||||
keys = keys[:len(keys)-1]
|
||||
}
|
||||
@@ -58,10 +58,10 @@ func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (er
|
||||
}
|
||||
}
|
||||
if !bytes.Equal(root, args[0]) {
|
||||
return cmn.NewError("Calculated root hash is invalid: expected %+v but got %+v", root, args[0])
|
||||
return errors.Errorf("Calculated root hash is invalid: expected %+v but got %+v", root, args[0])
|
||||
}
|
||||
if len(keys) != 0 {
|
||||
return cmn.NewError("Keypath not consumed all")
|
||||
return errors.New("Keypath not consumed all")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) {
|
||||
func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) {
|
||||
decoder := prt.decoders[pop.Type]
|
||||
if decoder == nil {
|
||||
return nil, cmn.NewError("unrecognized proof type %v", pop.Type)
|
||||
return nil, errors.Errorf("unrecognized proof type %v", pop.Type)
|
||||
}
|
||||
return decoder(pop)
|
||||
}
|
||||
@@ -102,7 +102,7 @@ func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) {
|
||||
for _, pop := range proof.Ops {
|
||||
operator, err := prt.Decode(pop)
|
||||
if err != nil {
|
||||
return nil, cmn.ErrorWrap(err, "decoding a proof operator")
|
||||
return nil, errors.Wrap(err, "decoding a proof operator")
|
||||
}
|
||||
poz = append(poz, operator)
|
||||
}
|
||||
@@ -122,7 +122,7 @@ func (prt *ProofRuntime) VerifyAbsence(proof *Proof, root []byte, keypath string
|
||||
func (prt *ProofRuntime) Verify(proof *Proof, root []byte, keypath string, args [][]byte) (err error) {
|
||||
poz, err := prt.DecodeProof(proof)
|
||||
if err != nil {
|
||||
return cmn.ErrorWrap(err, "decoding proof")
|
||||
return errors.Wrap(err, "decoding proof")
|
||||
}
|
||||
return poz.Verify(root, keypath, args)
|
||||
}
|
||||
|
@@ -6,7 +6,7 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -87,7 +87,7 @@ func (pth KeyPath) String() string {
|
||||
// Each key must use a known encoding.
|
||||
func KeyPathToKeys(path string) (keys [][]byte, err error) {
|
||||
if path == "" || path[0] != '/' {
|
||||
return nil, cmn.NewError("key path string must start with a forward slash '/'")
|
||||
return nil, errors.New("key path string must start with a forward slash '/'")
|
||||
}
|
||||
parts := strings.Split(path[1:], "/")
|
||||
keys = make([][]byte, len(parts))
|
||||
@@ -96,13 +96,13 @@ func KeyPathToKeys(path string) (keys [][]byte, err error) {
|
||||
hexPart := part[2:]
|
||||
key, err := hex.DecodeString(hexPart)
|
||||
if err != nil {
|
||||
return nil, cmn.ErrorWrap(err, "decoding hex-encoded part #%d: /%s", i, part)
|
||||
return nil, errors.Wrapf(err, "decoding hex-encoded part #%d: /%s", i, part)
|
||||
}
|
||||
keys[i] = key
|
||||
} else {
|
||||
key, err := url.PathUnescape(part)
|
||||
if err != nil {
|
||||
return nil, cmn.ErrorWrap(err, "decoding url-encoded part #%d: /%s", i, part)
|
||||
return nil, errors.Wrapf(err, "decoding url-encoded part #%d: /%s", i, part)
|
||||
}
|
||||
keys[i] = []byte(key) // TODO Test this with random bytes, I'm not sure that it works for arbitrary bytes...
|
||||
}
|
||||
|
@@ -4,8 +4,9 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
const ProofOpSimpleValue = "simple:v"
|
||||
@@ -39,12 +40,12 @@ func NewSimpleValueOp(key []byte, proof *SimpleProof) SimpleValueOp {
|
||||
|
||||
func SimpleValueOpDecoder(pop ProofOp) (ProofOperator, error) {
|
||||
if pop.Type != ProofOpSimpleValue {
|
||||
return nil, cmn.NewError("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue)
|
||||
return nil, errors.Errorf("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue)
|
||||
}
|
||||
var op SimpleValueOp // a bit strange as we'll discard this, but it works.
|
||||
err := cdc.UnmarshalBinaryLengthPrefixed(pop.Data, &op)
|
||||
if err != nil {
|
||||
return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp")
|
||||
return nil, errors.Wrap(err, "decoding ProofOp.Data into SimpleValueOp")
|
||||
}
|
||||
return NewSimpleValueOp(pop.Key, op.Proof), nil
|
||||
}
|
||||
@@ -64,7 +65,7 @@ func (op SimpleValueOp) String() string {
|
||||
|
||||
func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, cmn.NewError("expected 1 arg, got %v", len(args))
|
||||
return nil, errors.Errorf("expected 1 arg, got %v", len(args))
|
||||
}
|
||||
value := args[0]
|
||||
hasher := tmhash.New()
|
||||
@@ -78,7 +79,7 @@ func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) {
|
||||
kvhash := leafHash(bz.Bytes())
|
||||
|
||||
if !bytes.Equal(kvhash, op.Proof.LeafHash) {
|
||||
return nil, cmn.NewError("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash)
|
||||
return nil, errors.Errorf("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash)
|
||||
}
|
||||
|
||||
return [][]byte{
|
||||
|
@@ -3,9 +3,9 @@ package merkle
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
const ProofOpDomino = "test:domino"
|
||||
@@ -34,7 +34,7 @@ func DominoOpDecoder(pop ProofOp) (ProofOperator, error) {
|
||||
var op DominoOp // a bit strange as we'll discard this, but it works.
|
||||
err := amino.UnmarshalBinaryLengthPrefixed(pop.Data, &op)
|
||||
if err != nil {
|
||||
return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp")
|
||||
return nil, errors.Wrap(err, "decoding ProofOp.Data into SimpleValueOp")
|
||||
}
|
||||
return NewDominoOp(string(pop.Key), op.Input, op.Output), nil
|
||||
}
|
||||
@@ -50,10 +50,10 @@ func (dop DominoOp) ProofOp() ProofOp {
|
||||
|
||||
func (dop DominoOp) Run(input [][]byte) (output [][]byte, err error) {
|
||||
if len(input) != 1 {
|
||||
return nil, cmn.NewError("Expected input of length 1")
|
||||
return nil, errors.New("Expected input of length 1")
|
||||
}
|
||||
if string(input[0]) != dop.Input {
|
||||
return nil, cmn.NewError("Expected input %v, got %v",
|
||||
return nil, errors.Errorf("Expected input %v, got %v",
|
||||
dop.Input, string(input[0]))
|
||||
}
|
||||
return [][]byte{[]byte(dop.Output)}, nil
|
||||
|
@@ -2,10 +2,9 @@ package merkle
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// SimpleProof represents a simple Merkle proof.
|
||||
@@ -75,11 +74,11 @@ func (sp *SimpleProof) Verify(rootHash []byte, leaf []byte) error {
|
||||
return errors.New("Proof index cannot be negative")
|
||||
}
|
||||
if !bytes.Equal(sp.LeafHash, leafHash) {
|
||||
return cmn.NewError("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash)
|
||||
return errors.Errorf("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash)
|
||||
}
|
||||
computedHash := sp.ComputeRootHash()
|
||||
if !bytes.Equal(computedHash, rootHash) {
|
||||
return cmn.NewError("invalid root hash: wanted %X got %X", rootHash, computedHash)
|
||||
return errors.Errorf("invalid root hash: wanted %X got %X", rootHash, computedHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -162,11 +161,12 @@ func (spn *SimpleProofNode) FlattenAunts() [][]byte {
|
||||
// Nonrecursive impl.
|
||||
innerHashes := [][]byte{}
|
||||
for spn != nil {
|
||||
if spn.Left != nil {
|
||||
switch {
|
||||
case spn.Left != nil:
|
||||
innerHashes = append(innerHashes, spn.Left.Hash)
|
||||
} else if spn.Right != nil {
|
||||
case spn.Right != nil:
|
||||
innerHashes = append(innerHashes, spn.Right.Hash)
|
||||
} else {
|
||||
default:
|
||||
break
|
||||
}
|
||||
spn = spn.Parent
|
||||
|
@@ -30,6 +30,7 @@
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// nolint:gocritic
|
||||
package secp256k1
|
||||
|
||||
import (
|
||||
|
@@ -34,6 +34,14 @@ module.exports = {
|
||||
"/introduction/what-is-tendermint"
|
||||
]
|
||||
},
|
||||
{
|
||||
title: "Guides",
|
||||
collapsable: false,
|
||||
children: [
|
||||
"/guides/go-built-in",
|
||||
"/guides/go"
|
||||
]
|
||||
},
|
||||
{
|
||||
title: "Apps",
|
||||
collapsable: false,
|
||||
|
@@ -20,3 +20,41 @@ it stands today.
|
||||
If recorded decisions turned out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match.
|
||||
|
||||
Note the context/background should be written in the present tense.
|
||||
|
||||
### Table of Contents:
|
||||
|
||||
- [ADR-001-Logging](./adr-001-logging.md)
|
||||
- [ADR-002-Event-Subscription](./adr-002-event-subscription.md)
|
||||
- [ADR-003-ABCI-APP-RPC](./adr-003-abci-app-rpc.md)
|
||||
- [ADR-004-Historical-Validators](./adr-004-historical-validators.md)
|
||||
- [ADR-005-Consensus-Params](./adr-005-consensus-params.md)
|
||||
- [ADR-006-Trust-Metric](./adr-006-trust-metric.md)
|
||||
- [ADR-007-Trust-Metric-Usage](./adr-007-trust-metric-usage.md)
|
||||
- [ADR-008-Priv-Validator](./adr-008-priv-validator.md)
|
||||
- [ADR-009-ABCI-Design](./adr-009-abci-design.md)
|
||||
- [ADR-010-Crypto-Changes](./adr-010-crypto-changes.md)
|
||||
- [ADR-011-Monitoring](./adr-011-monitoring.md)
|
||||
- [ADR-012-Peer-Transport](./adr-012-peer-transport.md)
|
||||
- [ADR-013-Symmetric-Crypto](./adr-013-symmetric-crypto.md)
|
||||
- [ADR-014-Secp-Malleability](./adr-014-secp-malleability.md)
|
||||
- [ADR-015-Crypto-Encoding](./adr-015-crypto-encoding.md)
|
||||
- [ADR-016-Protocol-Versions](./adr-016-protocol-versions.md)
|
||||
- [ADR-017-Chain-Versions](./adr-017-chain-versions.md)
|
||||
- [ADR-018-ABCI-Validators](./adr-018-abci-validators.md)
|
||||
- [ADR-019-Multisigs](./adr-019-multisigs.md)
|
||||
- [ADR-020-Block-Size](./adr-020-block-size.md)
|
||||
- [ADR-021-ABCI-Events](./adr-021-abci-events.md)
|
||||
- [ADR-022-ABCI-Errors](./adr-022-abci-errors.md)
|
||||
- [ADR-023-ABCI-Propose-tx](./adr-023-ABCI-propose-tx.md)
|
||||
- [ADR-024-Sign-Bytes](./adr-024-sign-bytes.md)
|
||||
- [ADR-025-Commit](./adr-025-commit.md)
|
||||
- [ADR-026-General-Merkle-Proof](./adr-026-general-merkle-proof.md)
|
||||
- [ADR-029-Check-Tx-Consensus](./adr-029-check-tx-consensus.md)
|
||||
- [ADR-030-Consensus-Refactor](./adr-030-consensus-refactor.md)
|
||||
- [ADR-033-Pubsub](./adr-033-pubsub.md)
|
||||
- [ADR-034-Priv-Validator-File-Structure](./adr-034-priv-validator-file-structure.md)
|
||||
- [ADR-035-Documentation](./adr-035-documentation.md)
|
||||
- [ADR-037-Deliver-Block](./adr-037-deliver-block.md)
|
||||
- [ADR-039-Peer-Behaviour](./adr-039-peer-behaviour.md)
|
||||
- [ADR-041-Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md)
|
||||
- [ADR-043-Blockchain-RiRi-Org](./adr-043-blockchain-riri-org.md)
|
||||
|
239
docs/architecture/adr-042-state-sync.md
Normal file
239
docs/architecture/adr-042-state-sync.md
Normal file
@@ -0,0 +1,239 @@
|
||||
# ADR 042: State Sync Design
|
||||
|
||||
## Changelog
|
||||
|
||||
2019-06-27: Init by EB
|
||||
2019-07-04: Follow up by brapse
|
||||
|
||||
## Context
|
||||
StateSync is a feature which would allow a new node to receive a
|
||||
snapshot of the application state without downloading blocks or going
|
||||
through consensus. Once downloaded, the node could switch to FastSync
|
||||
and eventually participate in consensus. The goal of StateSync is to
|
||||
facilitate setting up a new node as quickly as possible.
|
||||
|
||||
## Considerations
|
||||
Because Tendermint doesn't know anything about the application state,
|
||||
StateSync will broker messages between nodes and through
|
||||
the ABCI to an opaque applicaton. The implementation will have multiple
|
||||
touch points on both the tendermint code base and ABCI application.
|
||||
|
||||
* A StateSync reactor to facilitate peer communication - Tendermint
|
||||
* A Set of ABCI messages to transmit application state to the reactor - Tendermint
|
||||
* A Set of MultiStore APIs for exposing snapshot data to the ABCI - ABCI application
|
||||
* A Storage format with validation and performance considerations - ABCI application
|
||||
|
||||
### Implementation Properties
|
||||
Beyond the approach, any implementation of StateSync can be evaluated
|
||||
across different criteria:
|
||||
|
||||
* Speed: Expected throughput of producing and consuming snapshots
|
||||
* Safety: Cost of pushing invalid snapshots to a node
|
||||
* Liveness: Cost of preventing a node from receiving/constructing a snapshot
|
||||
* Effort: How much effort does an implementation require
|
||||
|
||||
### Implementation Question
|
||||
* What is the format of a snapshot
|
||||
* Complete snapshot
|
||||
* Ordered IAVL key ranges
|
||||
* Compressed individually chunks which can be validated
|
||||
* How is data validated
|
||||
* Trust a peer with it's data blindly
|
||||
* Trust a majority of peers
|
||||
* Use light client validation to validate each chunk against consensus
|
||||
produced merkle tree root
|
||||
* What are the performance characteristics
|
||||
* Random vs sequential reads
|
||||
* How parallelizeable is the scheduling algorithm
|
||||
|
||||
### Proposals
|
||||
Broadly speaking there are two approaches to this problem which have had
|
||||
varying degrees of discussion and progress. These approach can be
|
||||
summarized as:
|
||||
|
||||
**Lazy:** Where snapshots are produced dynamically at request time. This
|
||||
solution would use the existing data structure.
|
||||
**Eager:** Where snapshots are produced periodically and served from disk at
|
||||
request time. This solution would create an auxiliary data structure
|
||||
optimized for batch read/writes.
|
||||
|
||||
Additionally the propsosals tend to vary on how they provide safety
|
||||
properties.
|
||||
|
||||
**LightClient** Where a client can aquire the merkle root from the block
|
||||
headers synchronized from a trusted validator set. Subsets of the application state,
|
||||
called chunks can therefore be validated on receipt to ensure each chunk
|
||||
is part of the merkle root.
|
||||
|
||||
**Majority of Peers** Where manifests of chunks along with checksums are
|
||||
downloaded and compared against versions provided by a majority of
|
||||
peers.
|
||||
|
||||
#### Lazy StateSync
|
||||
An [initial specification](https://docs.google.com/document/d/15MFsQtNA0MGBv7F096FFWRDzQ1vR6_dics5Y49vF8JU/edit?ts=5a0f3629) was published by Alexis Sellier.
|
||||
In this design, the state has a given `size` of primitive elements (like
|
||||
keys or nodes), each element is assigned a number from 0 to `size-1`,
|
||||
and chunks consists of a range of such elements. Ackratos raised
|
||||
[some concerns](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit)
|
||||
about this design, somewhat specific to the IAVL tree, and mainly concerning
|
||||
performance of random reads and of iterating through the tree to determine element numbers
|
||||
(ie. elements aren't indexed by the element number).
|
||||
|
||||
An alternative design was suggested by Jae Kwon in
|
||||
[#3639](https://github.com/tendermint/tendermint/issues/3639) where chunking
|
||||
happens lazily and in a dynamic way: nodes request key ranges from their peers,
|
||||
and peers respond with some subset of the
|
||||
requested range and with notes on how to request the rest in parallel from other
|
||||
peers. Unlike chunk numbers, keys can be verified directly. And if some keys in the
|
||||
range are ommitted, proofs for the range will fail to verify.
|
||||
This way a node can start by requesting the entire tree from one peer,
|
||||
and that peer can respond with say the first few keys, and the ranges to request
|
||||
from other peers.
|
||||
|
||||
Additionally, per chunk validation tends to come more naturally to the
|
||||
Lazy approach since it tends to use the existing structure of the tree
|
||||
(ie. keys or nodes) rather than state-sync specific chunks. Such a
|
||||
design for tendermint was originally tracked in
|
||||
[#828](https://github.com/tendermint/tendermint/issues/828).
|
||||
|
||||
#### Eager StateSync
|
||||
Warp Sync as implemented in Parity
|
||||
["Warp Sync"](https://wiki.parity.io/Warp-Sync-Snapshot-Format.html) to rapidly
|
||||
download both blocks and state snapshots from peers. Data is carved into ~4MB
|
||||
chunks and snappy compressed. Hashes of snappy compressed chunks are stored in a
|
||||
manifest file which co-ordinates the state-sync. Obtaining a correct manifest
|
||||
file seems to require an honest majority of peers. This means you may not find
|
||||
out the state is incorrect until you download the whole thing and compare it
|
||||
with a verified block header.
|
||||
|
||||
A similar solution was implemented by Binance in
|
||||
[#3594](https://github.com/tendermint/tendermint/pull/3594)
|
||||
based on their initial implementation in
|
||||
[PR #3243](https://github.com/tendermint/tendermint/pull/3243)
|
||||
and [some learnings](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit).
|
||||
Note this still requires the honest majority peer assumption.
|
||||
|
||||
As an eager protocol, warp-sync can efficiently compress larger, more
|
||||
predicatable chunks once per snapshot and service many new peers. By
|
||||
comparison lazy chunkers would have to compress each chunk at request
|
||||
time.
|
||||
|
||||
### Analysis of Lazy vs Eager
|
||||
Lazy vs Eager have more in common than they differ. They all require
|
||||
reactors on the tendermint side, a set of ABCI messages and a method for
|
||||
serializing/deserializing snapshots facilitated by a SnapshotFormat.
|
||||
|
||||
The biggest difference between Lazy and Eager proposals is in the
|
||||
read/write patterns necessitated by serving a snapshot chunk.
|
||||
Specifically, Lazy State Sync performs random reads to the underlying data
|
||||
structure while Eager can optimize for sequential reads.
|
||||
|
||||
This distinctin between approaches was demonstrated by Binance's
|
||||
[ackratos](https://github.com/ackratos) in their implementation of [Lazy
|
||||
State sync](https://github.com/tendermint/tendermint/pull/3243), The
|
||||
[analysis](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/)
|
||||
of the performance, and follow up implementation of [Warp
|
||||
Sync](http://github.com/tendermint/tendermint/pull/3594).
|
||||
|
||||
#### Compairing Security Models
|
||||
There are several different security models which have been
|
||||
discussed/proposed in the past but generally fall into two categories.
|
||||
|
||||
Light client validation: In which the node receiving data is expected to
|
||||
first perform a light client sync and have all the nessesary block
|
||||
headers. Within the trusted block header (trusted in terms of from a
|
||||
validator set subject to [weak
|
||||
subjectivity](https://github.com/tendermint/tendermint/pull/3795)) and
|
||||
can compare any subset of keys called a chunk against the merkle root.
|
||||
The advantage of light client validation is that the block headers are
|
||||
signed by validators which have something to lose for malicious
|
||||
behaviour. If a validator were to provide an invalid proof, they can be
|
||||
slashed.
|
||||
|
||||
Majority of peer validation: A manifest file containing a list of chunks
|
||||
along with checksums of each chunk is downloaded from a
|
||||
trusted source. That source can be a community resource similar to
|
||||
[sum.golang.org](https://sum.golang.org) or downloaded from the majority
|
||||
of peers. One disadantage of the majority of peer security model is the
|
||||
vuliberability to eclipse attacks in which a malicious users looks to
|
||||
saturate a target node's peer list and produce a manufactured picture of
|
||||
majority.
|
||||
|
||||
A third option would be to include snapshot related data in the
|
||||
block header. This could include the manifest with related checksums and be
|
||||
secured through consensus. One challenge of this approach is to
|
||||
ensure that creating snapshots does not put undo burden on block
|
||||
propsers by synchronizing snapshot creation and block creation. One
|
||||
approach to minimizing the burden is for snapshots for height
|
||||
`H` to be included in block `H+n` where `n` is some `n` block away,
|
||||
giving the block propser enough time to complete the snapshot
|
||||
asynchronousy.
|
||||
|
||||
## Proposal: Eager StateSync With Per Chunk Light Client Validation
|
||||
The conclusion after some concideration of the advantages/disadvances of
|
||||
eager/lazy and different security models is to produce a state sync
|
||||
which eagerly produces snapshots and uses light client validation. This
|
||||
approach has the performance advantages of pre-computing efficient
|
||||
snapshots which can streamed to new nodes on demand using sequential IO.
|
||||
Secondly, by using light client validation we cna validate each chunk on
|
||||
receipt and avoid the potential eclipse attack of majority of peer based
|
||||
security.
|
||||
|
||||
### Implementation
|
||||
Tendermint is responsible for downloading and verifying chunks of
|
||||
AppState from peers. ABCI Application is responsible for taking
|
||||
AppStateChunk objects from TM and constructing a valid state tree whose
|
||||
root corresponds with the AppHash of syncing block. In particular we
|
||||
will need implement:
|
||||
|
||||
* Build new StateSync reactor brokers message transmission between the peers
|
||||
and the ABCI application
|
||||
* A set of ABCI Messages
|
||||
* Design SnapshotFormat as an interface which can:
|
||||
* validate chunks
|
||||
* read/write chunks from file
|
||||
* read/write chunks to/from application state store
|
||||
* convert manifests into chunkRequest ABCI messages
|
||||
* Implement SnapshotFormat for cosmos-hub with concrete implementation for:
|
||||
* read/write chunks in a way which can be:
|
||||
* parallelized across peers
|
||||
* validated on receipt
|
||||
* read/write to/from IAVL+ tree
|
||||
|
||||

|
||||
|
||||
## Implementation Path
|
||||
* Create StateSync reactor based on [#3753](https://github.com/tendermint/tendermint/pull/3753)
|
||||
* Design SnapshotFormat with an eye towards cosmos-hub implementation
|
||||
* ABCI message to send/receive SnapshotFormat
|
||||
* IAVL+ changes to support SnapshotFormat
|
||||
* Deliver Warp sync (no chunk validation)
|
||||
* light client implementation for weak subjectivity
|
||||
* Deliver StateSync with chunk validation
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Concequences
|
||||
|
||||
### Neutral
|
||||
|
||||
### Positive
|
||||
* Safe & performant state sync design substantiated with real world implementation experience
|
||||
* General interfaces allowing application specific innovation
|
||||
* Parallizable implementation trajectory with reasonable engineering effort
|
||||
|
||||
### Negative
|
||||
* Static Scheduling lacks opportunity for real time chunk availability optimizations
|
||||
|
||||
## References
|
||||
[sync: Sync current state without full replay for Applications](https://github.com/tendermint/tendermint/issues/828) - original issue
|
||||
[tendermint state sync proposal](https://docs.google.com/document/d/15MFsQtNA0MGBv7F096FFWRDzQ1vR6_dics5Y49vF8JU/edit?ts=5a0f3629) - Cloudhead proposal
|
||||
[tendermint state sync proposal 2](https://docs.google.com/document/d/1npGTAa1qxe8EQZ1wG0a0Sip9t5oX2vYZNUDwr_LVRR4/edit) - ackratos proposal
|
||||
[proposal 2 implementation](https://github.com/tendermint/tendermint/pull/3243) - ackratos implementation
|
||||
[WIP General/Lazy State-Sync pseudo-spec](https://github.com/tendermint/tendermint/issues/3639) - Jae Proposal
|
||||
[Warp Sync Implementation](https://github.com/tendermint/tendermint/pull/3594) - ackratos
|
||||
[Chunk Proposal](https://github.com/tendermint/tendermint/pull/3799) - Bucky proposed
|
||||
|
||||
|
BIN
docs/architecture/img/state-sync.png
Normal file
BIN
docs/architecture/img/state-sync.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 99 KiB |
@@ -1,4 +1,6 @@
|
||||
# 1 Guide Assumptions
|
||||
# Creating a built-in application in Go
|
||||
|
||||
## Guide assumptions
|
||||
|
||||
This guide is designed for beginners who want to get started with a Tendermint
|
||||
Core application from scratch. It does not assume that you have any prior
|
||||
@@ -17,7 +19,7 @@ yourself with the syntax.
|
||||
By following along with this guide, you'll create a Tendermint Core project
|
||||
called kvstore, a (very) simple distributed BFT key-value store.
|
||||
|
||||
# 1 Creating a built-in application in Go
|
||||
## Built-in app vs external app
|
||||
|
||||
Running your application inside the same process as Tendermint Core will give
|
||||
you the best possible performance.
|
||||
@@ -628,3 +630,10 @@ $ curl -s 'localhost:26657/abci_query?data="tendermint"'
|
||||
|
||||
"dGVuZGVybWludA==" and "cm9ja3M=" are the base64-encoding of the ASCII of
|
||||
"tendermint" and "rocks" accordingly.
|
||||
|
||||
## Outro
|
||||
|
||||
I hope everything went smoothly and your first, but hopefully not the last,
|
||||
Tendermint Core application is up and running. If not, please [open an issue on
|
||||
Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig
|
||||
deeper, read [the docs](https://tendermint.com/docs/).
|
||||
|
@@ -1,4 +1,6 @@
|
||||
# 1 Guide Assumptions
|
||||
# Creating an application in Go
|
||||
|
||||
## Guide Assumptions
|
||||
|
||||
This guide is designed for beginners who want to get started with a Tendermint
|
||||
Core application from scratch. It does not assume that you have any prior
|
||||
@@ -17,7 +19,7 @@ yourself with the syntax.
|
||||
By following along with this guide, you'll create a Tendermint Core project
|
||||
called kvstore, a (very) simple distributed BFT key-value store.
|
||||
|
||||
# 1 Creating an application in Go
|
||||
## Built-in app vs external app
|
||||
|
||||
To get maximum performance it is better to run your application alongside
|
||||
Tendermint Core. [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written
|
||||
@@ -512,3 +514,10 @@ $ curl -s 'localhost:26657/abci_query?data="tendermint"'
|
||||
|
||||
"dGVuZGVybWludA==" and "cm9ja3M=" are the base64-encoding of the ASCII of
|
||||
"tendermint" and "rocks" accordingly.
|
||||
|
||||
## Outro
|
||||
|
||||
I hope everything went smoothly and your first, but hopefully not the last,
|
||||
Tendermint Core application is up and running. If not, please [open an issue on
|
||||
Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig
|
||||
deeper, read [the docs](https://tendermint.com/docs/).
|
||||
|
600
docs/guides/java.md
Normal file
600
docs/guides/java.md
Normal file
@@ -0,0 +1,600 @@
|
||||
# Creating an application in Java
|
||||
|
||||
## Guide Assumptions
|
||||
|
||||
This guide is designed for beginners who want to get started with a Tendermint
|
||||
Core application from scratch. It does not assume that you have any prior
|
||||
experience with Tendermint Core.
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state
|
||||
transition machine (your application) - written in any programming language - and securely
|
||||
replicates it on many machines.
|
||||
|
||||
By following along with this guide, you'll create a Tendermint Core project
|
||||
called kvstore, a (very) simple distributed BFT key-value store. The application (which should
|
||||
implementing the blockchain interface (ABCI)) will be written in Java.
|
||||
|
||||
This guide assumes that you are not new to JVM world. If you are new please see [JVM Minimal Survival Guide](https://hadihariri.com/2013/12/29/jvm-minimal-survival-guide-for-the-dotnet-developer/#java-the-language-java-the-ecosystem-java-the-jvm) and [Gradle Docs](https://docs.gradle.org/current/userguide/userguide.html).
|
||||
|
||||
## Built-in app vs external app
|
||||
|
||||
If you use Golang, you can run your app and Tendermint Core in the same process to get maximum performance.
|
||||
[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written this way.
|
||||
Please refer to [Writing a built-in Tendermint Core application in Go](./go-built-in.md) guide for details.
|
||||
|
||||
If you choose another language, like we did in this guide, you have to write a separate app,
|
||||
which will communicate with Tendermint Core via a socket (UNIX or TCP) or gRPC.
|
||||
This guide will show you how to build external application using RPC server.
|
||||
|
||||
Having a separate application might give you better security guarantees as two
|
||||
processes would be communicating via established binary protocol. Tendermint
|
||||
Core will not have access to application's state.
|
||||
|
||||
## 1.1 Installing Java and Gradle
|
||||
|
||||
Please refer to [the Oracle's guide for installing JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html).
|
||||
|
||||
Verify that you have installed Java successfully:
|
||||
|
||||
```sh
|
||||
$ java -version
|
||||
java version "12.0.2" 2019-07-16
|
||||
Java(TM) SE Runtime Environment (build 12.0.2+10)
|
||||
Java HotSpot(TM) 64-Bit Server VM (build 12.0.2+10, mixed mode, sharing)
|
||||
```
|
||||
|
||||
You can choose any version of Java higher or equal to 8.
|
||||
This guide is written using Java SE Development Kit 12.
|
||||
|
||||
Make sure you have `$JAVA_HOME` environment variable set:
|
||||
|
||||
```sh
|
||||
$ echo $JAVA_HOME
|
||||
/Library/Java/JavaVirtualMachines/jdk-12.0.2.jdk/Contents/Home
|
||||
```
|
||||
|
||||
For Gradle installation, please refer to [their official guide](https://gradle.org/install/).
|
||||
|
||||
## 1.2 Creating a new Java project
|
||||
|
||||
We'll start by creating a new Gradle project.
|
||||
|
||||
```sh
|
||||
$ export KVSTORE_HOME=~/kvstore
|
||||
$ mkdir $KVSTORE_HOME
|
||||
$ cd $KVSTORE_HOME
|
||||
```
|
||||
|
||||
Inside the example directory run:
|
||||
```sh
|
||||
gradle init --dsl groovy --package io.example --project-name example --type java-application --test-framework junit
|
||||
```
|
||||
This will create a new project for you. The tree of files should look like:
|
||||
```sh
|
||||
$ tree
|
||||
.
|
||||
|-- build.gradle
|
||||
|-- gradle
|
||||
| `-- wrapper
|
||||
| |-- gradle-wrapper.jar
|
||||
| `-- gradle-wrapper.properties
|
||||
|-- gradlew
|
||||
|-- gradlew.bat
|
||||
|-- settings.gradle
|
||||
`-- src
|
||||
|-- main
|
||||
| |-- java
|
||||
| | `-- io
|
||||
| | `-- example
|
||||
| | `-- App.java
|
||||
| `-- resources
|
||||
`-- test
|
||||
|-- java
|
||||
| `-- io
|
||||
| `-- example
|
||||
| `-- AppTest.java
|
||||
`-- resources
|
||||
```
|
||||
|
||||
When run, this should print "Hello world." to the standard output.
|
||||
|
||||
```sh
|
||||
$ ./gradlew run
|
||||
> Task :run
|
||||
Hello world.
|
||||
```
|
||||
|
||||
## 1.3 Writing a Tendermint Core application
|
||||
|
||||
Tendermint Core communicates with the application through the Application
|
||||
BlockChain Interface (ABCI). All message types are defined in the [protobuf
|
||||
file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto).
|
||||
This allows Tendermint Core to run applications written in any programming
|
||||
language.
|
||||
|
||||
### 1.3.1 Compile .proto files
|
||||
|
||||
Add the following piece to the top of the `build.gradle`:
|
||||
```groovy
|
||||
buildscript {
|
||||
repositories {
|
||||
mavenCentral()
|
||||
}
|
||||
dependencies {
|
||||
classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.8'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Enable the protobuf plugin in the `plugins` section of the `build.gradle`:
|
||||
```groovy
|
||||
plugins {
|
||||
id 'com.google.protobuf' version '0.8.8'
|
||||
}
|
||||
```
|
||||
|
||||
Add the following code to `build.gradle`:
|
||||
```groovy
|
||||
protobuf {
|
||||
protoc {
|
||||
artifact = "com.google.protobuf:protoc:3.7.1"
|
||||
}
|
||||
plugins {
|
||||
grpc {
|
||||
artifact = 'io.grpc:protoc-gen-grpc-java:1.22.1'
|
||||
}
|
||||
}
|
||||
generateProtoTasks {
|
||||
all()*.plugins {
|
||||
grpc {}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Now we should be ready to compile the `*.proto` files.
|
||||
|
||||
|
||||
Copy the necessary `.proto` files to your project:
|
||||
```sh
|
||||
mkdir -p \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/common \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto
|
||||
|
||||
cp $GOPATH/src/github.com/tendermint/tendermint/abci/types/types.proto \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types/types.proto
|
||||
cp $GOPATH/src/github.com/tendermint/tendermint/crypto/merkle/merkle.proto \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle/merkle.proto
|
||||
cp $GOPATH/src/github.com/tendermint/tendermint/libs/common/types.proto \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/common/types.proto
|
||||
cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto/gogo.proto
|
||||
```
|
||||
|
||||
Add these dependencies to `build.gradle`:
|
||||
```groovy
|
||||
dependencies {
|
||||
implementation 'io.grpc:grpc-protobuf:1.22.1'
|
||||
implementation 'io.grpc:grpc-netty-shaded:1.22.1'
|
||||
implementation 'io.grpc:grpc-stub:1.22.1'
|
||||
}
|
||||
```
|
||||
|
||||
To generate all protobuf-type classes run:
|
||||
```sh
|
||||
./gradlew generateProto
|
||||
```
|
||||
To verify that everything went smoothly, you can inspect the `build/generated/` directory:
|
||||
```sh
|
||||
$ tree build/generated/
|
||||
build/generated/
|
||||
|-- source
|
||||
| `-- proto
|
||||
| `-- main
|
||||
| |-- grpc
|
||||
| | `-- types
|
||||
| | `-- ABCIApplicationGrpc.java
|
||||
| `-- java
|
||||
| |-- com
|
||||
| | `-- google
|
||||
| | `-- protobuf
|
||||
| | `-- GoGoProtos.java
|
||||
| |-- common
|
||||
| | `-- Types.java
|
||||
| |-- merkle
|
||||
| | `-- Merkle.java
|
||||
| `-- types
|
||||
| `-- Types.java
|
||||
```
|
||||
|
||||
### 1.3.2 Implementing ABCI
|
||||
|
||||
The resulting `$KVSTORE_HOME/build/generated/source/proto/main/grpc/types/ABCIApplicationGrpc.java` file
|
||||
contains the abstract class `ABCIApplicationImplBase`, which is an interface we'll need to implement.
|
||||
|
||||
Create `$KVSTORE_HOME/src/main/java/io/example/KVStoreApp.java` file with the following content:
|
||||
```java
|
||||
package io.example;
|
||||
|
||||
import io.grpc.stub.StreamObserver;
|
||||
import types.ABCIApplicationGrpc;
|
||||
import types.Types.*;
|
||||
|
||||
class KVStoreApp extends ABCIApplicationGrpc.ABCIApplicationImplBase {
|
||||
|
||||
// methods implementation
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
Now I will go through each method of `ABCIApplicationImplBase` explaining when it's called and adding
|
||||
required business logic.
|
||||
|
||||
### 1.3.3 CheckTx
|
||||
|
||||
When a new transaction is added to the Tendermint Core, it will ask the
|
||||
application to check it (validate the format, signatures, etc.).
|
||||
|
||||
```java
|
||||
@Override
|
||||
public void checkTx(RequestCheckTx req, StreamObserver<ResponseCheckTx> responseObserver) {
|
||||
var tx = req.getTx();
|
||||
int code = validate(tx);
|
||||
var resp = ResponseCheckTx.newBuilder()
|
||||
.setCode(code)
|
||||
.setGasWanted(1)
|
||||
.build();
|
||||
responseObserver.onNext(resp);
|
||||
responseObserver.onCompleted();
|
||||
}
|
||||
|
||||
private int validate(ByteString tx) {
|
||||
List<byte[]> parts = split(tx, '=');
|
||||
if (parts.size() != 2) {
|
||||
return 1;
|
||||
}
|
||||
byte[] key = parts.get(0);
|
||||
byte[] value = parts.get(1);
|
||||
|
||||
// check if the same key=value already exists
|
||||
var stored = getPersistedValue(key);
|
||||
if (stored != null && Arrays.equals(stored, value)) {
|
||||
return 2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
private List<byte[]> split(ByteString tx, char separator) {
|
||||
var arr = tx.toByteArray();
|
||||
int i;
|
||||
for (i = 0; i < tx.size(); i++) {
|
||||
if (arr[i] == (byte)separator) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == tx.size()) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
return List.of(
|
||||
tx.substring(0, i).toByteArray(),
|
||||
tx.substring(i + 1).toByteArray()
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
Don't worry if this does not compile yet.
|
||||
|
||||
If the transaction does not have a form of `{bytes}={bytes}`, we return `1`
|
||||
code. When the same key=value already exist (same key and value), we return `2`
|
||||
code. For others, we return a zero code indicating that they are valid.
|
||||
|
||||
Note that anything with non-zero code will be considered invalid (`-1`, `100`,
|
||||
etc.) by Tendermint Core.
|
||||
|
||||
Valid transactions will eventually be committed given they are not too big and
|
||||
have enough gas. To learn more about gas, check out ["the
|
||||
specification"](https://tendermint.com/docs/spec/abci/apps.html#gas).
|
||||
|
||||
For the underlying key-value store we'll use
|
||||
[JetBrains Xodus](https://github.com/JetBrains/xodus), which is a transactional schema-less embedded high-performance database written in Java.
|
||||
|
||||
`build.gradle`:
|
||||
```groovy
|
||||
dependencies {
|
||||
implementation 'org.jetbrains.xodus:xodus-environment:1.3.91'
|
||||
}
|
||||
```
|
||||
|
||||
```java
|
||||
...
|
||||
import jetbrains.exodus.ArrayByteIterable;
|
||||
import jetbrains.exodus.ByteIterable;
|
||||
import jetbrains.exodus.env.Environment;
|
||||
import jetbrains.exodus.env.Store;
|
||||
import jetbrains.exodus.env.StoreConfig;
|
||||
import jetbrains.exodus.env.Transaction;
|
||||
|
||||
class KVStoreApp extends ABCIApplicationGrpc.ABCIApplicationImplBase {
|
||||
private Environment env;
|
||||
private Transaction txn = null;
|
||||
private Store store = null;
|
||||
|
||||
KVStoreApp(Environment env) {
|
||||
this.env = env;
|
||||
}
|
||||
|
||||
...
|
||||
|
||||
private byte[] getPersistedValue(byte[] k) {
|
||||
return env.computeInReadonlyTransaction(txn -> {
|
||||
var store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn);
|
||||
ByteIterable byteIterable = store.get(txn, new ArrayByteIterable(k));
|
||||
if (byteIterable == null) {
|
||||
return null;
|
||||
}
|
||||
return byteIterable.getBytesUnsafe();
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.4 BeginBlock -> DeliverTx -> EndBlock -> Commit
|
||||
|
||||
When Tendermint Core has decided on the block, it's transferred to the
|
||||
application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and
|
||||
`EndBlock` in the end. `DeliverTx` are being transferred asynchronously, but the
|
||||
responses are expected to come in order.
|
||||
|
||||
```java
|
||||
@Override
|
||||
public void beginBlock(RequestBeginBlock req, StreamObserver<ResponseBeginBlock> responseObserver) {
|
||||
txn = env.beginTransaction();
|
||||
store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn);
|
||||
var resp = ResponseBeginBlock.newBuilder().build();
|
||||
responseObserver.onNext(resp);
|
||||
responseObserver.onCompleted();
|
||||
}
|
||||
```
|
||||
Here we begin a new transaction, which will accumulate the block's transactions and open the corresponding store.
|
||||
|
||||
```java
|
||||
@Override
|
||||
public void deliverTx(RequestDeliverTx req, StreamObserver<ResponseDeliverTx> responseObserver) {
|
||||
var tx = req.getTx();
|
||||
int code = validate(tx);
|
||||
if (code == 0) {
|
||||
List<byte[]> parts = split(tx, '=');
|
||||
var key = new ArrayByteIterable(parts.get(0));
|
||||
var value = new ArrayByteIterable(parts.get(1));
|
||||
store.put(txn, key, value);
|
||||
}
|
||||
var resp = ResponseDeliverTx.newBuilder()
|
||||
.setCode(code)
|
||||
.build();
|
||||
responseObserver.onNext(resp);
|
||||
responseObserver.onCompleted();
|
||||
}
|
||||
```
|
||||
|
||||
If the transaction is badly formatted or the same key=value already exist, we
|
||||
again return the non-zero code. Otherwise, we add it to the store.
|
||||
|
||||
In the current design, a block can include incorrect transactions (those who
|
||||
passed `CheckTx`, but failed `DeliverTx` or transactions included by the proposer
|
||||
directly). This is done for performance reasons.
|
||||
|
||||
Note we can't commit transactions inside the `DeliverTx` because in such case
|
||||
`Query`, which may be called in parallel, will return inconsistent data (i.e.
|
||||
it will report that some value already exist even when the actual block was not
|
||||
yet committed).
|
||||
|
||||
`Commit` instructs the application to persist the new state.
|
||||
|
||||
```java
|
||||
@Override
|
||||
public void commit(RequestCommit req, StreamObserver<ResponseCommit> responseObserver) {
|
||||
txn.commit();
|
||||
var resp = ResponseCommit.newBuilder()
|
||||
.setData(ByteString.copyFrom(new byte[8]))
|
||||
.build();
|
||||
responseObserver.onNext(resp);
|
||||
responseObserver.onCompleted();
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.5 Query
|
||||
|
||||
Now, when the client wants to know whenever a particular key/value exist, it
|
||||
will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call
|
||||
the application's `Query` method.
|
||||
|
||||
Applications are free to provide their own APIs. But by using Tendermint Core
|
||||
as a proxy, clients (including [light client
|
||||
package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage
|
||||
the unified API across different applications. Plus they won't have to call the
|
||||
otherwise separate Tendermint Core API for additional proofs.
|
||||
|
||||
Note we don't include a proof here.
|
||||
|
||||
```java
|
||||
@Override
|
||||
public void query(RequestQuery req, StreamObserver<ResponseQuery> responseObserver) {
|
||||
var k = req.getData().toByteArray();
|
||||
var v = getPersistedValue(k);
|
||||
var builder = ResponseQuery.newBuilder();
|
||||
if (v == null) {
|
||||
builder.setLog("does not exist");
|
||||
} else {
|
||||
builder.setLog("exists");
|
||||
builder.setKey(ByteString.copyFrom(k));
|
||||
builder.setValue(ByteString.copyFrom(v));
|
||||
}
|
||||
responseObserver.onNext(builder.build());
|
||||
responseObserver.onCompleted();
|
||||
}
|
||||
```
|
||||
|
||||
The complete specification can be found
|
||||
[here](https://tendermint.com/docs/spec/abci/).
|
||||
|
||||
## 1.4 Starting an application and a Tendermint Core instances
|
||||
|
||||
Put the following code into the `$KVSTORE_HOME/src/main/java/io/example/App.java` file:
|
||||
|
||||
```java
|
||||
package io.example;
|
||||
|
||||
import jetbrains.exodus.env.Environment;
|
||||
import jetbrains.exodus.env.Environments;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class App {
|
||||
public static void main(String[] args) throws IOException, InterruptedException {
|
||||
try (Environment env = Environments.newInstance("tmp/storage")) {
|
||||
var app = new KVStoreApp(env);
|
||||
var server = new GrpcServer(app, 26658);
|
||||
server.start();
|
||||
server.blockUntilShutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
It is the entry point of the application.
|
||||
Here we create a special object `Environment`, which knows where to store the application state.
|
||||
Then we create and start the gRPC server to handle Tendermint Core requests.
|
||||
|
||||
Create the `$KVSTORE_HOME/src/main/java/io/example/GrpcServer.java` file with the following content:
|
||||
```java
|
||||
package io.example;
|
||||
|
||||
import io.grpc.BindableService;
|
||||
import io.grpc.Server;
|
||||
import io.grpc.ServerBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
class GrpcServer {
|
||||
private Server server;
|
||||
|
||||
GrpcServer(BindableService service, int port) {
|
||||
this.server = ServerBuilder.forPort(port)
|
||||
.addService(service)
|
||||
.build();
|
||||
}
|
||||
|
||||
void start() throws IOException {
|
||||
server.start();
|
||||
System.out.println("gRPC server started, listening on $port");
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
|
||||
System.out.println("shutting down gRPC server since JVM is shutting down");
|
||||
GrpcServer.this.stop();
|
||||
System.out.println("server shut down");
|
||||
}));
|
||||
}
|
||||
|
||||
private void stop() {
|
||||
server.shutdown();
|
||||
}
|
||||
|
||||
/**
|
||||
* Await termination on the main thread since the grpc library uses daemon threads.
|
||||
*/
|
||||
void blockUntilShutdown() throws InterruptedException {
|
||||
server.awaitTermination();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 1.5 Getting Up and Running
|
||||
|
||||
To create a default configuration, nodeKey and private validator files, let's
|
||||
execute `tendermint init`. But before we do that, we will need to install
|
||||
Tendermint Core.
|
||||
|
||||
```sh
|
||||
$ rm -rf /tmp/example
|
||||
$ cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
$ make install
|
||||
$ TMHOME="/tmp/example" tendermint init
|
||||
|
||||
I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json
|
||||
I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json
|
||||
I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json
|
||||
```
|
||||
|
||||
Feel free to explore the generated files, which can be found at
|
||||
`/tmp/example/config` directory. Documentation on the config can be found
|
||||
[here](https://tendermint.com/docs/tendermint-core/configuration.html).
|
||||
|
||||
We are ready to start our application:
|
||||
|
||||
```sh
|
||||
./gradlew run
|
||||
|
||||
gRPC server started, listening on 26658
|
||||
```
|
||||
|
||||
Then we need to start Tendermint Core and point it to our application. Staying
|
||||
within the application directory execute:
|
||||
|
||||
```sh
|
||||
$ TMHOME="/tmp/example" tendermint node --abci grpc --proxy_app tcp://127.0.0.1:26658
|
||||
|
||||
I[2019-07-28|15:44:53.632] Version info module=main software=0.32.1 block=10 p2p=7
|
||||
I[2019-07-28|15:44:53.677] Starting Node module=main impl=Node
|
||||
I[2019-07-28|15:44:53.681] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:7639e2841ccd47d5ae0f5aad3011b14049d3f452 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-Nhl3zk Version:0.32.1 Channels:4020212223303800 Moniker:Ivans-MacBook-Pro.local Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}"
|
||||
I[2019-07-28|15:44:54.801] Executed block module=state height=8 validTxs=0 invalidTxs=0
|
||||
I[2019-07-28|15:44:54.814] Committed state module=state height=8 txs=0 appHash=0000000000000000
|
||||
```
|
||||
|
||||
Now open another tab in your terminal and try sending a transaction:
|
||||
|
||||
```sh
|
||||
$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"'
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"result": {
|
||||
"check_tx": {
|
||||
"gasWanted": "1"
|
||||
},
|
||||
"deliver_tx": {},
|
||||
"hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB",
|
||||
"height": "33"
|
||||
}
|
||||
```
|
||||
|
||||
Response should contain the height where this transaction was committed.
|
||||
|
||||
Now let's check if the given key now exists and its value:
|
||||
|
||||
```sh
|
||||
$ curl -s 'localhost:26657/abci_query?data="tendermint"'
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"result": {
|
||||
"response": {
|
||||
"log": "exists",
|
||||
"key": "dGVuZGVybWludA==",
|
||||
"value": "cm9ja3My"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of `tendermint` and `rocks` accordingly.
|
||||
|
||||
## Outro
|
||||
|
||||
I hope everything went smoothly and your first, but hopefully not the last,
|
||||
Tendermint Core application is up and running. If not, please [open an issue on
|
||||
Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig
|
||||
deeper, read [the docs](https://tendermint.com/docs/).
|
||||
|
||||
The full source code of this example project can be found [here](https://github.com/climber73/tendermint-abci-grpc-java).
|
574
docs/guides/kotlin.md
Normal file
574
docs/guides/kotlin.md
Normal file
@@ -0,0 +1,574 @@
|
||||
# Creating an application in Kotlin
|
||||
|
||||
## Guide Assumptions
|
||||
|
||||
This guide is designed for beginners who want to get started with a Tendermint
|
||||
Core application from scratch. It does not assume that you have any prior
|
||||
experience with Tendermint Core.
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state
|
||||
transition machine (your application) - written in any programming language - and securely
|
||||
replicates it on many machines.
|
||||
|
||||
By following along with this guide, you'll create a Tendermint Core project
|
||||
called kvstore, a (very) simple distributed BFT key-value store. The application (which should
|
||||
implementing the blockchain interface (ABCI)) will be written in Kotlin.
|
||||
|
||||
This guide assumes that you are not new to JVM world. If you are new please see [JVM Minimal Survival Guide](https://hadihariri.com/2013/12/29/jvm-minimal-survival-guide-for-the-dotnet-developer/#java-the-language-java-the-ecosystem-java-the-jvm) and [Gradle Docs](https://docs.gradle.org/current/userguide/userguide.html).
|
||||
|
||||
## Built-in app vs external app
|
||||
|
||||
If you use Golang, you can run your app and Tendermint Core in the same process to get maximum performance.
|
||||
[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) is written this way.
|
||||
Please refer to [Writing a built-in Tendermint Core application in Go](./go-built-in.md) guide for details.
|
||||
|
||||
If you choose another language, like we did in this guide, you have to write a separate app,
|
||||
which will communicate with Tendermint Core via a socket (UNIX or TCP) or gRPC.
|
||||
This guide will show you how to build external application using RPC server.
|
||||
|
||||
Having a separate application might give you better security guarantees as two
|
||||
processes would be communicating via established binary protocol. Tendermint
|
||||
Core will not have access to application's state.
|
||||
|
||||
## 1.1 Installing Java and Gradle
|
||||
|
||||
Please refer to [the Oracle's guide for installing JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html).
|
||||
|
||||
Verify that you have installed Java successfully:
|
||||
|
||||
```sh
|
||||
$ java -version
|
||||
java version "1.8.0_162"
|
||||
Java(TM) SE Runtime Environment (build 1.8.0_162-b12)
|
||||
Java HotSpot(TM) 64-Bit Server VM (build 25.162-b12, mixed mode)
|
||||
```
|
||||
|
||||
You can choose any version of Java higher or equal to 8.
|
||||
In my case it is Java SE Development Kit 8.
|
||||
|
||||
Make sure you have `$JAVA_HOME` environment variable set:
|
||||
|
||||
```sh
|
||||
$ echo $JAVA_HOME
|
||||
/Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home
|
||||
```
|
||||
|
||||
For Gradle installation, please refer to [their official guide](https://gradle.org/install/).
|
||||
|
||||
## 1.2 Creating a new Kotlin project
|
||||
|
||||
We'll start by creating a new Gradle project.
|
||||
|
||||
```sh
|
||||
$ export KVSTORE_HOME=~/kvstore
|
||||
$ mkdir $KVSTORE_HOME
|
||||
$ cd $KVSTORE_HOME
|
||||
```
|
||||
|
||||
Inside the example directory run:
|
||||
```sh
|
||||
gradle init --dsl groovy --package io.example --project-name example --type kotlin-application
|
||||
```
|
||||
This will create a new project for you. The tree of files should look like:
|
||||
```sh
|
||||
$ tree
|
||||
.
|
||||
|-- build.gradle
|
||||
|-- gradle
|
||||
| `-- wrapper
|
||||
| |-- gradle-wrapper.jar
|
||||
| `-- gradle-wrapper.properties
|
||||
|-- gradlew
|
||||
|-- gradlew.bat
|
||||
|-- settings.gradle
|
||||
`-- src
|
||||
|-- main
|
||||
| |-- kotlin
|
||||
| | `-- io
|
||||
| | `-- example
|
||||
| | `-- App.kt
|
||||
| `-- resources
|
||||
`-- test
|
||||
|-- kotlin
|
||||
| `-- io
|
||||
| `-- example
|
||||
| `-- AppTest.kt
|
||||
`-- resources
|
||||
```
|
||||
|
||||
When run, this should print "Hello world." to the standard output.
|
||||
|
||||
```sh
|
||||
$ ./gradlew run
|
||||
> Task :run
|
||||
Hello world.
|
||||
```
|
||||
|
||||
## 1.3 Writing a Tendermint Core application
|
||||
|
||||
Tendermint Core communicates with the application through the Application
|
||||
BlockChain Interface (ABCI). All message types are defined in the [protobuf
|
||||
file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto).
|
||||
This allows Tendermint Core to run applications written in any programming
|
||||
language.
|
||||
|
||||
### 1.3.1 Compile .proto files
|
||||
|
||||
Add the following piece to the top of the `build.gradle`:
|
||||
```groovy
|
||||
buildscript {
|
||||
repositories {
|
||||
mavenCentral()
|
||||
}
|
||||
dependencies {
|
||||
classpath 'com.google.protobuf:protobuf-gradle-plugin:0.8.8'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Enable the protobuf plugin in the `plugins` section of the `build.gradle`:
|
||||
```groovy
|
||||
plugins {
|
||||
id 'com.google.protobuf' version '0.8.8'
|
||||
}
|
||||
```
|
||||
|
||||
Add the following code to `build.gradle`:
|
||||
```groovy
|
||||
protobuf {
|
||||
protoc {
|
||||
artifact = "com.google.protobuf:protoc:3.7.1"
|
||||
}
|
||||
plugins {
|
||||
grpc {
|
||||
artifact = 'io.grpc:protoc-gen-grpc-java:1.22.1'
|
||||
}
|
||||
}
|
||||
generateProtoTasks {
|
||||
all()*.plugins {
|
||||
grpc {}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Now we should be ready to compile the `*.proto` files.
|
||||
|
||||
|
||||
Copy the necessary `.proto` files to your project:
|
||||
```sh
|
||||
mkdir -p \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/common \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto
|
||||
|
||||
cp $GOPATH/src/github.com/tendermint/tendermint/abci/types/types.proto \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types/types.proto
|
||||
cp $GOPATH/src/github.com/tendermint/tendermint/crypto/merkle/merkle.proto \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle/merkle.proto
|
||||
cp $GOPATH/src/github.com/tendermint/tendermint/libs/common/types.proto \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/common/types.proto
|
||||
cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto \
|
||||
$KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto/gogo.proto
|
||||
```
|
||||
|
||||
Add these dependencies to `build.gradle`:
|
||||
```groovy
|
||||
dependencies {
|
||||
implementation 'io.grpc:grpc-protobuf:1.22.1'
|
||||
implementation 'io.grpc:grpc-netty-shaded:1.22.1'
|
||||
implementation 'io.grpc:grpc-stub:1.22.1'
|
||||
}
|
||||
```
|
||||
|
||||
To generate all protobuf-type classes run:
|
||||
```sh
|
||||
./gradlew generateProto
|
||||
```
|
||||
To verify that everything went smoothly, you can inspect the `build/generated/` directory:
|
||||
```sh
|
||||
$ tree build/generated/
|
||||
build/generated/
|
||||
`-- source
|
||||
`-- proto
|
||||
`-- main
|
||||
|-- grpc
|
||||
| `-- types
|
||||
| `-- ABCIApplicationGrpc.java
|
||||
`-- java
|
||||
|-- com
|
||||
| `-- google
|
||||
| `-- protobuf
|
||||
| `-- GoGoProtos.java
|
||||
|-- common
|
||||
| `-- Types.java
|
||||
|-- merkle
|
||||
| `-- Merkle.java
|
||||
`-- types
|
||||
`-- Types.java
|
||||
```
|
||||
|
||||
### 1.3.2 Implementing ABCI
|
||||
|
||||
The resulting `$KVSTORE_HOME/build/generated/source/proto/main/grpc/types/ABCIApplicationGrpc.java` file
|
||||
contains the abstract class `ABCIApplicationImplBase`, which is an interface we'll need to implement.
|
||||
|
||||
Create `$KVSTORE_HOME/src/main/kotlin/io/example/KVStoreApp.kt` file with the following content:
|
||||
```kotlin
|
||||
package io.example
|
||||
|
||||
import io.grpc.stub.StreamObserver
|
||||
import types.ABCIApplicationGrpc
|
||||
import types.Types.*
|
||||
|
||||
class KVStoreApp : ABCIApplicationGrpc.ABCIApplicationImplBase() {
|
||||
|
||||
// methods implementation
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
Now I will go through each method of `ABCIApplicationImplBase` explaining when it's called and adding
|
||||
required business logic.
|
||||
|
||||
### 1.3.3 CheckTx
|
||||
|
||||
When a new transaction is added to the Tendermint Core, it will ask the
|
||||
application to check it (validate the format, signatures, etc.).
|
||||
|
||||
```kotlin
|
||||
override fun checkTx(req: RequestCheckTx, responseObserver: StreamObserver<ResponseCheckTx>) {
|
||||
val code = req.tx.validate()
|
||||
val resp = ResponseCheckTx.newBuilder()
|
||||
.setCode(code)
|
||||
.setGasWanted(1)
|
||||
.build()
|
||||
responseObserver.onNext(resp)
|
||||
responseObserver.onCompleted()
|
||||
}
|
||||
|
||||
private fun ByteString.validate(): Int {
|
||||
val parts = this.split('=')
|
||||
if (parts.size != 2) {
|
||||
return 1
|
||||
}
|
||||
val key = parts[0]
|
||||
val value = parts[1]
|
||||
|
||||
// check if the same key=value already exists
|
||||
val stored = getPersistedValue(key)
|
||||
if (stored != null && stored.contentEquals(value)) {
|
||||
return 2
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
private fun ByteString.split(separator: Char): List<ByteArray> {
|
||||
val arr = this.toByteArray()
|
||||
val i = (0 until this.size()).firstOrNull { arr[it] == separator.toByte() }
|
||||
?: return emptyList()
|
||||
return listOf(
|
||||
this.substring(0, i).toByteArray(),
|
||||
this.substring(i + 1).toByteArray()
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
Don't worry if this does not compile yet.
|
||||
|
||||
If the transaction does not have a form of `{bytes}={bytes}`, we return `1`
|
||||
code. When the same key=value already exist (same key and value), we return `2`
|
||||
code. For others, we return a zero code indicating that they are valid.
|
||||
|
||||
Note that anything with non-zero code will be considered invalid (`-1`, `100`,
|
||||
etc.) by Tendermint Core.
|
||||
|
||||
Valid transactions will eventually be committed given they are not too big and
|
||||
have enough gas. To learn more about gas, check out ["the
|
||||
specification"](https://tendermint.com/docs/spec/abci/apps.html#gas).
|
||||
|
||||
For the underlying key-value store we'll use
|
||||
[JetBrains Xodus](https://github.com/JetBrains/xodus), which is a transactional schema-less embedded high-performance database written in Java.
|
||||
|
||||
`build.gradle`:
|
||||
```groovy
|
||||
dependencies {
|
||||
implementation 'org.jetbrains.xodus:xodus-environment:1.3.91'
|
||||
}
|
||||
```
|
||||
|
||||
```kotlin
|
||||
...
|
||||
import jetbrains.exodus.ArrayByteIterable
|
||||
import jetbrains.exodus.env.Environment
|
||||
import jetbrains.exodus.env.Store
|
||||
import jetbrains.exodus.env.StoreConfig
|
||||
import jetbrains.exodus.env.Transaction
|
||||
|
||||
class KVStoreApp(
|
||||
private val env: Environment
|
||||
) : ABCIApplicationGrpc.ABCIApplicationImplBase() {
|
||||
|
||||
private var txn: Transaction? = null
|
||||
private var store: Store? = null
|
||||
|
||||
...
|
||||
|
||||
private fun getPersistedValue(k: ByteArray): ByteArray? {
|
||||
return env.computeInReadonlyTransaction { txn ->
|
||||
val store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn)
|
||||
store.get(txn, ArrayByteIterable(k))?.bytesUnsafe
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.4 BeginBlock -> DeliverTx -> EndBlock -> Commit
|
||||
|
||||
When Tendermint Core has decided on the block, it's transferred to the
|
||||
application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and
|
||||
`EndBlock` in the end. `DeliverTx` are being transferred asynchronously, but the
|
||||
responses are expected to come in order.
|
||||
|
||||
```kotlin
|
||||
override fun beginBlock(req: RequestBeginBlock, responseObserver: StreamObserver<ResponseBeginBlock>) {
|
||||
txn = env.beginTransaction()
|
||||
store = env.openStore("store", StoreConfig.WITHOUT_DUPLICATES, txn!!)
|
||||
val resp = ResponseBeginBlock.newBuilder().build()
|
||||
responseObserver.onNext(resp)
|
||||
responseObserver.onCompleted()
|
||||
}
|
||||
```
|
||||
Here we begin a new transaction, which will accumulate the block's transactions and open the corresponding store.
|
||||
|
||||
```kotlin
|
||||
override fun deliverTx(req: RequestDeliverTx, responseObserver: StreamObserver<ResponseDeliverTx>) {
|
||||
val code = req.tx.validate()
|
||||
if (code == 0) {
|
||||
val parts = req.tx.split('=')
|
||||
val key = ArrayByteIterable(parts[0])
|
||||
val value = ArrayByteIterable(parts[1])
|
||||
store!!.put(txn!!, key, value)
|
||||
}
|
||||
val resp = ResponseDeliverTx.newBuilder()
|
||||
.setCode(code)
|
||||
.build()
|
||||
responseObserver.onNext(resp)
|
||||
responseObserver.onCompleted()
|
||||
}
|
||||
```
|
||||
|
||||
If the transaction is badly formatted or the same key=value already exist, we
|
||||
again return the non-zero code. Otherwise, we add it to the store.
|
||||
|
||||
In the current design, a block can include incorrect transactions (those who
|
||||
passed `CheckTx`, but failed `DeliverTx` or transactions included by the proposer
|
||||
directly). This is done for performance reasons.
|
||||
|
||||
Note we can't commit transactions inside the `DeliverTx` because in such case
|
||||
`Query`, which may be called in parallel, will return inconsistent data (i.e.
|
||||
it will report that some value already exist even when the actual block was not
|
||||
yet committed).
|
||||
|
||||
`Commit` instructs the application to persist the new state.
|
||||
|
||||
```kotlin
|
||||
override fun commit(req: RequestCommit, responseObserver: StreamObserver<ResponseCommit>) {
|
||||
txn!!.commit()
|
||||
val resp = ResponseCommit.newBuilder()
|
||||
.setData(ByteString.copyFrom(ByteArray(8)))
|
||||
.build()
|
||||
responseObserver.onNext(resp)
|
||||
responseObserver.onCompleted()
|
||||
}
|
||||
```
|
||||
|
||||
### 1.3.5 Query
|
||||
|
||||
Now, when the client wants to know whenever a particular key/value exist, it
|
||||
will call Tendermint Core RPC `/abci_query` endpoint, which in turn will call
|
||||
the application's `Query` method.
|
||||
|
||||
Applications are free to provide their own APIs. But by using Tendermint Core
|
||||
as a proxy, clients (including [light client
|
||||
package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage
|
||||
the unified API across different applications. Plus they won't have to call the
|
||||
otherwise separate Tendermint Core API for additional proofs.
|
||||
|
||||
Note we don't include a proof here.
|
||||
|
||||
```kotlin
|
||||
override fun query(req: RequestQuery, responseObserver: StreamObserver<ResponseQuery>) {
|
||||
val k = req.data.toByteArray()
|
||||
val v = getPersistedValue(k)
|
||||
val builder = ResponseQuery.newBuilder()
|
||||
if (v == null) {
|
||||
builder.log = "does not exist"
|
||||
} else {
|
||||
builder.log = "exists"
|
||||
builder.key = ByteString.copyFrom(k)
|
||||
builder.value = ByteString.copyFrom(v)
|
||||
}
|
||||
responseObserver.onNext(builder.build())
|
||||
responseObserver.onCompleted()
|
||||
}
|
||||
```
|
||||
|
||||
The complete specification can be found
|
||||
[here](https://tendermint.com/docs/spec/abci/).
|
||||
|
||||
## 1.4 Starting an application and a Tendermint Core instances
|
||||
|
||||
Put the following code into the `$KVSTORE_HOME/src/main/kotlin/io/example/App.kt` file:
|
||||
|
||||
```kotlin
|
||||
package io.example
|
||||
|
||||
import jetbrains.exodus.env.Environments
|
||||
|
||||
fun main() {
|
||||
Environments.newInstance("tmp/storage").use { env ->
|
||||
val app = KVStoreApp(env)
|
||||
val server = GrpcServer(app, 26658)
|
||||
server.start()
|
||||
server.blockUntilShutdown()
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
It is the entry point of the application.
|
||||
Here we create a special object `Environment`, which knows where to store the application state.
|
||||
Then we create and start the gRPC server to handle Tendermint Core requests.
|
||||
|
||||
Create `$KVSTORE_HOME/src/main/kotlin/io/example/GrpcServer.kt` file with the following content:
|
||||
```kotlin
|
||||
package io.example
|
||||
|
||||
import io.grpc.BindableService
|
||||
import io.grpc.ServerBuilder
|
||||
|
||||
class GrpcServer(
|
||||
private val service: BindableService,
|
||||
private val port: Int
|
||||
) {
|
||||
private val server = ServerBuilder
|
||||
.forPort(port)
|
||||
.addService(service)
|
||||
.build()
|
||||
|
||||
fun start() {
|
||||
server.start()
|
||||
println("gRPC server started, listening on $port")
|
||||
Runtime.getRuntime().addShutdownHook(object : Thread() {
|
||||
override fun run() {
|
||||
println("shutting down gRPC server since JVM is shutting down")
|
||||
this@GrpcServer.stop()
|
||||
println("server shut down")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fun stop() {
|
||||
server.shutdown()
|
||||
}
|
||||
|
||||
/**
|
||||
* Await termination on the main thread since the grpc library uses daemon threads.
|
||||
*/
|
||||
fun blockUntilShutdown() {
|
||||
server.awaitTermination()
|
||||
}
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
## 1.5 Getting Up and Running
|
||||
|
||||
To create a default configuration, nodeKey and private validator files, let's
|
||||
execute `tendermint init`. But before we do that, we will need to install
|
||||
Tendermint Core.
|
||||
|
||||
```sh
|
||||
$ rm -rf /tmp/example
|
||||
$ cd $GOPATH/src/github.com/tendermint/tendermint
|
||||
$ make install
|
||||
$ TMHOME="/tmp/example" tendermint init
|
||||
|
||||
I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json
|
||||
I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json
|
||||
I[2019-07-16|18:20:36.482] Generated genesis file module=main path=/tmp/example/config/genesis.json
|
||||
```
|
||||
|
||||
Feel free to explore the generated files, which can be found at
|
||||
`/tmp/example/config` directory. Documentation on the config can be found
|
||||
[here](https://tendermint.com/docs/tendermint-core/configuration.html).
|
||||
|
||||
We are ready to start our application:
|
||||
|
||||
```sh
|
||||
./gradlew run
|
||||
|
||||
gRPC server started, listening on 26658
|
||||
```
|
||||
|
||||
Then we need to start Tendermint Core and point it to our application. Staying
|
||||
within the application directory execute:
|
||||
|
||||
```sh
|
||||
$ TMHOME="/tmp/example" tendermint node --abci grpc --proxy_app tcp://127.0.0.1:26658
|
||||
|
||||
I[2019-07-28|15:44:53.632] Version info module=main software=0.32.1 block=10 p2p=7
|
||||
I[2019-07-28|15:44:53.677] Starting Node module=main impl=Node
|
||||
I[2019-07-28|15:44:53.681] Started node module=main nodeInfo="{ProtocolVersion:{P2P:7 Block:10 App:0} ID_:7639e2841ccd47d5ae0f5aad3011b14049d3f452 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-Nhl3zk Version:0.32.1 Channels:4020212223303800 Moniker:Ivans-MacBook-Pro.local Other:{TxIndex:on RPCAddress:tcp://127.0.0.1:26657}}"
|
||||
I[2019-07-28|15:44:54.801] Executed block module=state height=8 validTxs=0 invalidTxs=0
|
||||
I[2019-07-28|15:44:54.814] Committed state module=state height=8 txs=0 appHash=0000000000000000
|
||||
```
|
||||
|
||||
Now open another tab in your terminal and try sending a transaction:
|
||||
|
||||
```sh
|
||||
$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"'
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"result": {
|
||||
"check_tx": {
|
||||
"gasWanted": "1"
|
||||
},
|
||||
"deliver_tx": {},
|
||||
"hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB",
|
||||
"height": "33"
|
||||
}
|
||||
```
|
||||
|
||||
Response should contain the height where this transaction was committed.
|
||||
|
||||
Now let's check if the given key now exists and its value:
|
||||
|
||||
```sh
|
||||
$ curl -s 'localhost:26657/abci_query?data="tendermint"'
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "",
|
||||
"result": {
|
||||
"response": {
|
||||
"log": "exists",
|
||||
"key": "dGVuZGVybWludA==",
|
||||
"value": "cm9ja3My"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`dGVuZGVybWludA==` and `cm9ja3M=` are the base64-encoding of the ASCII of `tendermint` and `rocks` accordingly.
|
||||
|
||||
## Outro
|
||||
|
||||
I hope everything went smoothly and your first, but hopefully not the last,
|
||||
Tendermint Core application is up and running. If not, please [open an issue on
|
||||
Github](https://github.com/tendermint/tendermint/issues/new/choose). To dig
|
||||
deeper, read [the docs](https://tendermint.com/docs/).
|
||||
|
||||
The full source code of this example project can be found [here](https://github.com/climber73/tendermint-abci-grpc-kotlin).
|
Binary file not shown.
After Width: | Height: | Size: 43 KiB |
BIN
docs/spec/reactors/block_sync/bcv1/img/bc-reactor-new-fsm.png
Normal file
BIN
docs/spec/reactors/block_sync/bcv1/img/bc-reactor-new-fsm.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 41 KiB |
Binary file not shown.
After Width: | Height: | Size: 138 KiB |
237
docs/spec/reactors/block_sync/bcv1/impl-v1.md
Normal file
237
docs/spec/reactors/block_sync/bcv1/impl-v1.md
Normal file
@@ -0,0 +1,237 @@
|
||||
# Blockchain Reactor v1
|
||||
|
||||
### Data Structures
|
||||
The data structures used are illustrated below.
|
||||
|
||||

|
||||
|
||||
#### BlockchainReactor
|
||||
- is a `p2p.BaseReactor`.
|
||||
- has a `store.BlockStore` for persistence.
|
||||
- executes blocks using an `sm.BlockExecutor`.
|
||||
- starts the FSM and the `poolRoutine()`.
|
||||
- relays the fast-sync responses and switch messages to the FSM.
|
||||
- handles errors from the FSM and when necessarily reports them to the switch.
|
||||
- implements the blockchain reactor interface used by the FSM to send requests, errors to the switch and state timer resets.
|
||||
- registers all the concrete types and interfaces for serialisation.
|
||||
|
||||
```go
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
initialState sm.State // immutable
|
||||
state sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
|
||||
fastSync bool
|
||||
|
||||
fsm *BcReactorFSM
|
||||
blocksSynced int
|
||||
|
||||
// Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine.
|
||||
messagesForFSMCh chan bcReactorMessage
|
||||
|
||||
// Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed
|
||||
// to this channel to be processed in the context of the poolRoutine.
|
||||
errorsForFSMCh chan bcReactorMessage
|
||||
|
||||
// This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and
|
||||
// the switch.
|
||||
eventsFromFSMCh chan bcFsmMessage
|
||||
}
|
||||
```
|
||||
|
||||
#### BcReactorFSM
|
||||
- implements a simple finite state machine.
|
||||
- has a state and a state timer.
|
||||
- has a `BlockPool` to keep track of block requests sent to peers and blocks received from peers.
|
||||
- uses an interface to send status requests, block requests and reporting errors. The interface is implemented by the `BlockchainReactor` and tests.
|
||||
|
||||
```go
|
||||
type BcReactorFSM struct {
|
||||
logger log.Logger
|
||||
mtx sync.Mutex
|
||||
|
||||
startTime time.Time
|
||||
|
||||
state *bcReactorFSMState
|
||||
stateTimer *time.Timer
|
||||
pool *BlockPool
|
||||
|
||||
// interface used to call the Blockchain reactor to send StatusRequest, BlockRequest, reporting errors, etc.
|
||||
toBcR bcReactor
|
||||
}
|
||||
```
|
||||
|
||||
#### BlockPool
|
||||
- maintains a peer set, implemented as a map of peer ID to `BpPeer`.
|
||||
- maintains a set of requests made to peers, implemented as a map of block request heights to peer IDs.
|
||||
- maintains a list of future block requests needed to advance the fast-sync. This is a list of block heights.
|
||||
- keeps track of the maximum height of the peers in the set.
|
||||
- uses an interface to send requests and report errors to the reactor (via FSM).
|
||||
|
||||
```go
|
||||
type BlockPool struct {
|
||||
logger log.Logger
|
||||
// Set of peers that have sent status responses, with height bigger than pool.Height
|
||||
peers map[p2p.ID]*BpPeer
|
||||
// Set of block heights and the corresponding peers from where a block response is expected or has been received.
|
||||
blocks map[int64]p2p.ID
|
||||
|
||||
plannedRequests map[int64]struct{} // list of blocks to be assigned peers for blockRequest
|
||||
nextRequestHeight int64 // next height to be added to plannedRequests
|
||||
|
||||
Height int64 // height of next block to execute
|
||||
MaxPeerHeight int64 // maximum height of all peers
|
||||
toBcR bcReactor
|
||||
}
|
||||
```
|
||||
Some reasons for the `BlockPool` data structure content:
|
||||
1. If a peer is removed by the switch fast access is required to the peer and the block requests made to that peer in order to redo them.
|
||||
2. When block verification fails fast access is required from the block height to the peer and the block requests made to that peer in order to redo them.
|
||||
3. The `BlockchainReactor` main routine decides when the block pool is running low and asks the `BlockPool` (via FSM) to make more requests. The `BlockPool` creates a list of requests and triggers the sending of the block requests (via the interface). The reason it maintains a list of requests is the redo operations that may occur during error handling. These are redone when the `BlockchainReactor` requires more blocks.
|
||||
|
||||
#### BpPeer
|
||||
- keeps track of a single peer, with height bigger than the initial height.
|
||||
- maintains the block requests made to the peer and the blocks received from the peer until they are executed.
|
||||
- monitors the peer speed when there are pending requests.
|
||||
- it has an active timer when pending requests are present and reports error on timeout.
|
||||
|
||||
```go
|
||||
type BpPeer struct {
|
||||
logger log.Logger
|
||||
ID p2p.ID
|
||||
|
||||
Height int64 // the peer reported height
|
||||
NumPendingBlockRequests int // number of requests still waiting for block responses
|
||||
blocks map[int64]*types.Block // blocks received or expected to be received from this peer
|
||||
blockResponseTimer *time.Timer
|
||||
recvMonitor *flow.Monitor
|
||||
params *BpPeerParams // parameters for timer and monitor
|
||||
|
||||
onErr func(err error, peerID p2p.ID) // function to call on error
|
||||
}
|
||||
```
|
||||
|
||||
### Concurrency Model
|
||||
|
||||
The diagram below shows the goroutines (depicted by the gray blocks), timers (shown on the left with their values) and channels (colored rectangles). The FSM box shows some of the functionality and it is not a separate goroutine.
|
||||
|
||||
The interface used by the FSM is shown in light red with the `IF` block. This is used to:
|
||||
- send block requests
|
||||
- report peer errors to the switch - this results in the reactor calling `switch.StopPeerForError()` and, if triggered by the peer timeout routine, a `removePeerEv` is sent to the FSM and action is taken from the context of the `poolRoutine()`
|
||||
- ask the reactor to reset the state timers. The timers are owned by the FSM while the timeout routine is defined by the reactor. This was done in order to avoid running timers in tests and will change in the next revision.
|
||||
|
||||
There are two main goroutines implemented by the blockchain reactor. All I/O operations are performed from the `poolRoutine()` context while the CPU intensive operations related to the block execution are performed from the context of the `executeBlocksRoutine()`. All goroutines are detailed in the next sections.
|
||||
|
||||

|
||||
|
||||
#### Receive()
|
||||
Fast-sync messages from peers are received by this goroutine. It performs basic validation and:
|
||||
- in helper mode (i.e. for request message) it replies immediately. This is different than the proposal in adr-040 that specifies having the FSM handling these.
|
||||
- forwards response messages to the `poolRoutine()`.
|
||||
|
||||
#### poolRoutine()
|
||||
(named kept as in the previous reactor).
|
||||
It starts the `executeBlocksRoutine()` and the FSM. It then waits in a loop for events. These are received from the following channels:
|
||||
- `sendBlockRequestTicker.C` - every 10msec the reactor asks FSM to make more block requests up to a maximum. Note: currently this value is constant but could be changed based on low/ high watermark thresholds for the number of blocks received and waiting to be processed, the number of blockResponse messages waiting in messagesForFSMCh, etc.
|
||||
- `statusUpdateTicker.C` - every 10 seconds the reactor broadcasts status requests to peers. While adr-040 specifies this to run within the FSM, at this point this functionality is kept in the reactor.
|
||||
- `messagesForFSMCh` - the `Receive()` goroutine sends status and block response messages to this channel and the reactor calls FSM to handle them.
|
||||
- `errorsForFSMCh` - this channel receives the following events:
|
||||
- peer remove - when the switch removes a peer
|
||||
- sate timeout event - when FSM state timers trigger
|
||||
The reactor forwards this messages to the FSM.
|
||||
- `eventsFromFSMCh` - there are two type of events sent over this channel:
|
||||
- `syncFinishedEv` - triggered when FSM enters `finished` state and calls the switchToConsensus() interface function.
|
||||
- `peerErrorEv`- peer timer expiry goroutine sends this event over the channel for processing from poolRoutine() context.
|
||||
|
||||
#### executeBlocksRoutine()
|
||||
Started by the `poolRoutine()`, it retrieves blocks from the pool and executes them:
|
||||
- `processReceivedBlockTicker.C` - a ticker event is received over the channel every 10msec and its handling results in a signal being sent to the doProcessBlockCh channel.
|
||||
- doProcessBlockCh - events are received on this channel as described as above and upon processing blocks are retrieved from the pool and executed.
|
||||
|
||||
|
||||
### FSM
|
||||
|
||||

|
||||
|
||||
#### States
|
||||
##### init (aka unknown)
|
||||
The FSM is created in `unknown` state. When started, by the reactor (`startFSMEv`), it broadcasts Status requests and transitions to `waitForPeer` state.
|
||||
|
||||
##### waitForPeer
|
||||
In this state, the FSM waits for a Status responses from a "tall" peer. A timer is running in this state to allow the FSM to finish if there are no useful peers.
|
||||
|
||||
If the timer expires, it moves to `finished` state and calls the reactor to switch to consensus.
|
||||
If a Status response is received from a peer within the timeout, the FSM transitions to `waitForBlock` state.
|
||||
|
||||
##### waitForBlock
|
||||
In this state the FSM makes Block requests (triggered by a ticker in reactor) and waits for Block responses. There is a timer running in this state to detect if a peer is not sending the block at current processing height. If the timer expires, the FSM removes the peer where the request was sent and all requests made to that peer are redone.
|
||||
|
||||
As blocks are received they are stored by the pool. Block execution is independently performed by the reactor and the result reported to the FSM:
|
||||
- if there are no errors, the FSM increases the pool height and resets the state timer.
|
||||
- if there are errors, the peers that delivered the two blocks (at height and height+1) are removed and the requests redone.
|
||||
|
||||
In this state the FSM may receive peer remove events in any of the following scenarios:
|
||||
- the switch is removing a peer
|
||||
- a peer is penalized because it has not responded to some block requests for a long time
|
||||
- a peer is penalized for being slow
|
||||
|
||||
When processing of the last block (the one with height equal to the highest peer height minus one) is successful, the FSM transitions to `finished` state.
|
||||
If after a peer update or removal the pool height is same as maxPeerHeight, the FSM transitions to `finished` state.
|
||||
|
||||
##### finished
|
||||
When entering this state, the FSM calls the reactor to switch to consensus and performs cleanup.
|
||||
|
||||
#### Events
|
||||
|
||||
The following events are handled by the FSM:
|
||||
|
||||
```go
|
||||
const (
|
||||
startFSMEv = iota + 1
|
||||
statusResponseEv
|
||||
blockResponseEv
|
||||
processedBlockEv
|
||||
makeRequestsEv
|
||||
stopFSMEv
|
||||
peerRemoveEv = iota + 256
|
||||
stateTimeoutEv
|
||||
)
|
||||
```
|
||||
|
||||
### Examples of Scenarios and Termination Handling
|
||||
A few scenarios are covered in this section together with the current/ proposed handling.
|
||||
In general, the scenarios involving faulty peers are made worse by the fact that they may quickly be re-added.
|
||||
|
||||
#### 1. No Tall Peers
|
||||
|
||||
S: In this scenario a node is started and while there are status responses received, none of the peers are at a height higher than this node.
|
||||
|
||||
H: The FSM times out in `waitForPeer` state, moves to `finished` state where it calls the reactor to switch to consensus.
|
||||
|
||||
#### 2. Typical Fast Sync
|
||||
|
||||
S: A node fast syncs blocks from honest peers and eventually downloads and executes the penultimate block.
|
||||
|
||||
H: The FSM in `waitForBlock` state will receive the processedBlockEv from the reactor and detect that the termination height is achieved.
|
||||
|
||||
#### 3. Peer Claims Big Height but no Blocks
|
||||
|
||||
S: In this scenario a faulty peer claims a big height (for which there are no blocks).
|
||||
|
||||
H: The requests for the non-existing block will timeout, the peer removed and the pool's `MaxPeerHeight` updated. FSM checks if the termination height is achieved when peers are removed.
|
||||
|
||||
#### 4. Highest Peer Removed or Updated to Short
|
||||
|
||||
S: The fast sync node is caught up with all peers except one tall peer. The tall peer is removed or it sends status response with low height.
|
||||
|
||||
H: FSM checks termination condition on peer removal and updates.
|
||||
|
||||
#### 5. Block At Current Height Delayed
|
||||
|
||||
S: A peer can block the progress of fast sync by delaying indefinitely the block response for the current processing height (h1).
|
||||
|
||||
H: Currently, given h1 < h2, there is no enforcement at peer level that the response for h1 should be received before h2. So a peer will timeout only after delivering all blocks except h1. However the `waitForBlock` state timer fires if the block for current processing height is not received within a timeout. The peer is removed and the requests to that peer (including the one for current height) redone.
|
BIN
docs/spec/reactors/block_sync/img/bc-reactor-routines.png
Normal file
BIN
docs/spec/reactors/block_sync/img/bc-reactor-routines.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 265 KiB |
@@ -1,4 +1,6 @@
|
||||
## Blockchain Reactor
|
||||
## Blockchain Reactor v0 Modules
|
||||
|
||||
### Blockchain Reactor
|
||||
|
||||
- coordinates the pool for syncing
|
||||
- coordinates the store for persistence
|
||||
@@ -8,7 +10,7 @@
|
||||
- starts the pool.Start() and its poolRoutine()
|
||||
- registers all the concrete types and interfaces for serialisation
|
||||
|
||||
### poolRoutine
|
||||
#### poolRoutine
|
||||
|
||||
- listens to these channels:
|
||||
- pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends
|
||||
@@ -22,7 +24,7 @@
|
||||
- implements Receive which is called by the switch/peer
|
||||
- calls AddBlock on the pool when it receives a new block from a peer
|
||||
|
||||
## Block Pool
|
||||
### Block Pool
|
||||
|
||||
- responsible for downloading blocks from peers
|
||||
- makeRequestersRoutine()
|
||||
@@ -36,6 +38,7 @@
|
||||
- we receive a block
|
||||
- gotBlockCh is strange
|
||||
|
||||
## Block Store
|
||||
|
||||
- persists blocks to disk
|
||||
### Go Routines in Blockchain Reactor
|
||||
|
||||

|
||||
|
@@ -138,6 +138,12 @@ max_subscriptions_per_client = 5
|
||||
# See https://github.com/tendermint/tendermint/issues/3435
|
||||
timeout_broadcast_tx_commit = "10s"
|
||||
|
||||
# Maximum size of request body, in bytes
|
||||
max_body_bytes = {{ .RPC.MaxBodyBytes }}
|
||||
|
||||
# Maximum size of request header, in bytes
|
||||
max_header_bytes = {{ .RPC.MaxHeaderBytes }}
|
||||
|
||||
# The path to a file containing certificate that is used to create the HTTPS server.
|
||||
# Migth be either absolute path or path related to tendermint's config directory.
|
||||
# If the certificate is signed by a certificate authority,
|
||||
@@ -234,6 +240,18 @@ max_txs_bytes = 1073741824
|
||||
# Size of the cache (used to filter transactions we saw earlier) in transactions
|
||||
cache_size = 10000
|
||||
|
||||
# Maximum size of a single transaction.
|
||||
# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}.
|
||||
max_tx_bytes = 1048576
|
||||
|
||||
##### fast sync configuration options #####
|
||||
[fastsync]
|
||||
|
||||
# Fast Sync version to use:
|
||||
# 1) "v0" (default) - the legacy fast sync implementation
|
||||
# 2) "v1" - refactor of v0 version for better testability
|
||||
version = "v0"
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
|
@@ -5,8 +5,8 @@ import (
|
||||
"sync"
|
||||
|
||||
clist "github.com/tendermint/tendermint/libs/clist"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
|
@@ -7,10 +7,10 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@@ -11,10 +11,10 @@ import (
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
// evidenceLogger is a TestingLogger which uses a different
|
||||
|
@@ -3,8 +3,8 @@ package evidence
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
/*
|
||||
|
@@ -4,8 +4,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
//-------------------------------------------
|
||||
|
24
go.mod
24
go.mod
@@ -3,30 +3,29 @@ module github.com/tendermint/tendermint
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/VividCortex/gohistogram v1.0.0 // indirect
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect
|
||||
github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d
|
||||
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a
|
||||
github.com/etcd-io/bbolt v1.3.2
|
||||
github.com/fortytw2/leaktest v1.2.0
|
||||
github.com/go-kit/kit v0.6.0
|
||||
github.com/go-logfmt/logfmt v0.3.0
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/gogo/protobuf v1.2.1
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
|
||||
github.com/golang/protobuf v1.3.0
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/google/gofuzz v1.0.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 // indirect
|
||||
github.com/gorilla/websocket v1.2.0
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jmhodges/levigo v1.0.0
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.0.1
|
||||
github.com/magiconair/properties v1.8.0
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
||||
github.com/pelletier/go-toml v1.2.0 // indirect
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/client_golang v0.9.1
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect
|
||||
github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 // indirect
|
||||
@@ -39,12 +38,11 @@ require (
|
||||
github.com/spf13/jwalterweatherman v1.0.0 // indirect
|
||||
github.com/spf13/pflag v1.0.3 // indirect
|
||||
github.com/spf13/viper v1.0.0
|
||||
github.com/stretchr/testify v1.2.2
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/tendermint/go-amino v0.14.1
|
||||
go.etcd.io/bbolt v1.3.3 // indirect
|
||||
golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2 // indirect
|
||||
google.golang.org/grpc v1.13.0
|
||||
github.com/tendermint/tm-db v0.1.1
|
||||
golang.org/x/arch v0.0.0-20190312162104-788fe5ffcd8c // indirect
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7
|
||||
google.golang.org/grpc v1.22.0
|
||||
)
|
||||
|
58
go.sum
58
go.sum
@@ -1,3 +1,4 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=
|
||||
@@ -15,11 +16,13 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/etcd-io/bbolt v1.3.2 h1:RLRQ0TKLX7DlBRXAJHvbmXL17Q3KNnTBtZ9B6Qo+/Y0=
|
||||
github.com/etcd-io/bbolt v1.3.2/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/fortytw2/leaktest v1.2.0 h1:cj6GCiwJDH7l3tMHLjZDo0QqPtrXJiWSI9JgpeQKw+Q=
|
||||
github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
@@ -34,21 +37,25 @@ github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk=
|
||||
github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 h1:XTnP8fJpa4Kvpw2qARB4KS9izqxPS0Sd92cDlY3uk+w=
|
||||
github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/gorilla/websocket v1.2.0 h1:VJtLvh6VQym50czpZzx07z/kw9EgAxI3x1ZB8taTMQQ=
|
||||
github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
@@ -60,6 +67,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/libp2p/go-buffer-pool v0.0.1 h1:9Rrn/H46cXjaA2HQ5Y8lyhOS1NhTkZ4yuEs2r3Eechg=
|
||||
github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ=
|
||||
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
@@ -73,8 +82,8 @@ github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno=
|
||||
@@ -101,35 +110,50 @@ github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.0.0 h1:RUA/ghS2i64rlnn4ydTfblY8Og8QzcPtCcHvgMn+w/I=
|
||||
github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/syndtr/goleveldb v0.0.0-20181012014443-6b91fda63f2e h1:91EeXI4y4ShkyzkMqZ7QP/ZTIqwXp3RuDu5WFzxcFAs=
|
||||
github.com/syndtr/goleveldb v0.0.0-20181012014443-6b91fda63f2e/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
||||
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
|
||||
github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk=
|
||||
github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso=
|
||||
github.com/tendermint/tm-db v0.0.0-20190731085305-94017c88bf1d h1:yCHL2COLGLNfb4sA9AlzIHpapb8UATvAQyJulS6Eg6Q=
|
||||
github.com/tendermint/tm-db v0.0.0-20190731085305-94017c88bf1d/go.mod h1:0cPKWu2Mou3IlxecH+MEUSYc1Ch537alLe6CpFrKzgw=
|
||||
github.com/tendermint/tm-db v0.1.1 h1:G3Xezy3sOk9+ekhjZ/kjArYIs1SmwV+1OUgNkj7RgV0=
|
||||
github.com/tendermint/tm-db v0.1.1/go.mod h1:0cPKWu2Mou3IlxecH+MEUSYc1Ch537alLe6CpFrKzgw=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
golang.org/x/arch v0.0.0-20190312162104-788fe5ffcd8c h1:Rx/HTKi09myZ25t1SOlDHmHOy/mKxNAcu0hP1oPX9qM=
|
||||
golang.org/x/arch v0.0.0-20190312162104-788fe5ffcd8c/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25 h1:jsG6UpNLt9iAsb0S2AGW28DveNzzgmbXR+ENoPjUeIU=
|
||||
golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2 h1:67iHsV9djwGdZpdZNbLuQj6FOzCaZe3w+vhLjn5AcFA=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc=
|
||||
google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw=
|
||||
google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
@@ -138,3 +162,5 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
|
@@ -472,7 +472,8 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) {
|
||||
for {
|
||||
nn, err = gr.curReader.Read(p[n:])
|
||||
n += nn
|
||||
if err == io.EOF {
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
if n >= lenP {
|
||||
return n, nil
|
||||
}
|
||||
@@ -480,9 +481,9 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) {
|
||||
if err1 := gr.openFile(gr.curIndex + 1); err1 != nil {
|
||||
return n, err1
|
||||
}
|
||||
} else if err != nil {
|
||||
case err != nil:
|
||||
return n, err
|
||||
} else if nn == 0 { // empty file
|
||||
case nn == 0: // empty file
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
@@ -14,7 +14,7 @@ import (
|
||||
func WriteConfigVals(dir string, vals map[string]string) error {
|
||||
data := ""
|
||||
for k, v := range vals {
|
||||
data = data + fmt.Sprintf("%s = \"%s\"\n", k, v)
|
||||
data += fmt.Sprintf("%s = \"%s\"\n", k, v)
|
||||
}
|
||||
cfile := filepath.Join(dir, "config.toml")
|
||||
return ioutil.WriteFile(cfile, []byte(data), 0666)
|
||||
|
@@ -61,9 +61,10 @@ func (trs *TaskResultSet) Reap() *TaskResultSet {
|
||||
TaskResult: result,
|
||||
OK: true,
|
||||
}
|
||||
} else {
|
||||
// We already wrote it.
|
||||
}
|
||||
// else {
|
||||
// We already wrote it.
|
||||
// }
|
||||
default:
|
||||
// Do nothing.
|
||||
}
|
||||
@@ -83,9 +84,10 @@ func (trs *TaskResultSet) Wait() *TaskResultSet {
|
||||
TaskResult: result,
|
||||
OK: true,
|
||||
}
|
||||
} else {
|
||||
// We already wrote it.
|
||||
}
|
||||
// else {
|
||||
// We already wrote it.
|
||||
// }
|
||||
}
|
||||
return trs
|
||||
}
|
||||
|
@@ -31,18 +31,20 @@ func TestParallel(t *testing.T) {
|
||||
var failedTasks int
|
||||
for i := 0; i < len(tasks); i++ {
|
||||
taskResult, ok := trs.LatestResult(i)
|
||||
if !ok {
|
||||
switch {
|
||||
case !ok:
|
||||
assert.Fail(t, "Task #%v did not complete.", i)
|
||||
failedTasks++
|
||||
} else if taskResult.Error != nil {
|
||||
case taskResult.Error != nil:
|
||||
assert.Fail(t, "Task should not have errored but got %v", taskResult.Error)
|
||||
failedTasks++
|
||||
} else if !assert.Equal(t, -1*i, taskResult.Value.(int)) {
|
||||
case !assert.Equal(t, -1*i, taskResult.Value.(int)):
|
||||
assert.Fail(t, "Task should have returned %v but got %v", -1*i, taskResult.Value.(int))
|
||||
failedTasks++
|
||||
} else {
|
||||
// Good!
|
||||
}
|
||||
// else {
|
||||
// Good!
|
||||
// }
|
||||
}
|
||||
assert.Equal(t, failedTasks, 0, "No task should have failed")
|
||||
assert.Nil(t, trs.FirstError(), "There should be no errors")
|
||||
@@ -132,11 +134,12 @@ func checkResult(t *testing.T, taskResultSet *TaskResultSet, index int, val inte
|
||||
taskName := fmt.Sprintf("Task #%v", index)
|
||||
assert.True(t, ok, "TaskResultCh unexpectedly closed for %v", taskName)
|
||||
assert.Equal(t, val, taskResult.Value, taskName)
|
||||
if err != nil {
|
||||
switch {
|
||||
case err != nil:
|
||||
assert.Equal(t, err, taskResult.Error, taskName)
|
||||
} else if pnk != nil {
|
||||
case pnk != nil:
|
||||
assert.Equal(t, pnk, taskResult.Error.(Error).Data(), taskName)
|
||||
} else {
|
||||
default:
|
||||
assert.Nil(t, taskResult.Error, taskName)
|
||||
}
|
||||
}
|
||||
|
@@ -9,7 +9,7 @@ import (
|
||||
// Convenience method.
|
||||
|
||||
func ErrorWrap(cause interface{}, format string, args ...interface{}) Error {
|
||||
if causeCmnError, ok := cause.(*cmnError); ok {
|
||||
if causeCmnError, ok := cause.(*cmnError); ok { //nolint:gocritic
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
return causeCmnError.Stacktrace().Trace(1, msg)
|
||||
} else if cause == nil {
|
||||
|
@@ -45,11 +45,9 @@ func TestDeterminism(t *testing.T) {
|
||||
output := testThemAll()
|
||||
if i == 0 {
|
||||
firstOutput = output
|
||||
} else {
|
||||
if firstOutput != output {
|
||||
t.Errorf("Run #%d's output was different from first run.\nfirst: %v\nlast: %v",
|
||||
i, firstOutput, output)
|
||||
}
|
||||
} else if firstOutput != output {
|
||||
t.Errorf("Run #%d's output was different from first run.\nfirst: %v\nlast: %v",
|
||||
i, firstOutput, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -51,11 +51,12 @@ func IsASCIIText(s string) bool {
|
||||
func ASCIITrim(s string) string {
|
||||
r := make([]byte, 0, len(s))
|
||||
for _, b := range []byte(s) {
|
||||
if b == 32 {
|
||||
switch {
|
||||
case b == 32:
|
||||
continue // skip space
|
||||
} else if 32 < b && b <= 126 {
|
||||
case 32 < b && b <= 126:
|
||||
r = append(r, b)
|
||||
} else {
|
||||
default:
|
||||
panic(fmt.Sprintf("non-ASCII (non-tab) char 0x%X", b))
|
||||
}
|
||||
}
|
||||
|
@@ -1,223 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func cleanupDBDir(dir, name string) {
|
||||
err := os.RemoveAll(filepath.Join(dir, name) + ".db")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testBackendGetSetDelete(t *testing.T, backend DBBackendType) {
|
||||
// Default
|
||||
dirname, err := ioutil.TempDir("", fmt.Sprintf("test_backend_%s_", backend))
|
||||
require.Nil(t, err)
|
||||
db := NewDB("testdb", backend, dirname)
|
||||
defer cleanupDBDir(dirname, "testdb")
|
||||
|
||||
// A nonexistent key should return nil, even if the key is empty
|
||||
require.Nil(t, db.Get([]byte("")))
|
||||
|
||||
// A nonexistent key should return nil, even if the key is nil
|
||||
require.Nil(t, db.Get(nil))
|
||||
|
||||
// A nonexistent key should return nil.
|
||||
key := []byte("abc")
|
||||
require.Nil(t, db.Get(key))
|
||||
|
||||
// Set empty value.
|
||||
db.Set(key, []byte(""))
|
||||
require.NotNil(t, db.Get(key))
|
||||
require.Empty(t, db.Get(key))
|
||||
|
||||
// Set nil value.
|
||||
db.Set(key, nil)
|
||||
require.NotNil(t, db.Get(key))
|
||||
require.Empty(t, db.Get(key))
|
||||
|
||||
// Delete.
|
||||
db.Delete(key)
|
||||
require.Nil(t, db.Get(key))
|
||||
}
|
||||
|
||||
func TestBackendsGetSetDelete(t *testing.T) {
|
||||
for dbType := range backends {
|
||||
testBackendGetSetDelete(t, dbType)
|
||||
}
|
||||
}
|
||||
|
||||
func withDB(t *testing.T, creator dbCreator, fn func(DB)) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
dir := os.TempDir()
|
||||
db, err := creator(name, dir)
|
||||
require.Nil(t, err)
|
||||
defer cleanupDBDir(dir, name)
|
||||
fn(db)
|
||||
db.Close()
|
||||
}
|
||||
|
||||
func TestBackendsNilKeys(t *testing.T) {
|
||||
|
||||
// Test all backends.
|
||||
for dbType, creator := range backends {
|
||||
withDB(t, creator, func(db DB) {
|
||||
t.Run(fmt.Sprintf("Testing %s", dbType), func(t *testing.T) {
|
||||
|
||||
// Nil keys are treated as the empty key for most operations.
|
||||
expect := func(key, value []byte) {
|
||||
if len(key) == 0 { // nil or empty
|
||||
assert.Equal(t, db.Get(nil), db.Get([]byte("")))
|
||||
assert.Equal(t, db.Has(nil), db.Has([]byte("")))
|
||||
}
|
||||
assert.Equal(t, db.Get(key), value)
|
||||
assert.Equal(t, db.Has(key), value != nil)
|
||||
}
|
||||
|
||||
// Not set
|
||||
expect(nil, nil)
|
||||
|
||||
// Set nil value
|
||||
db.Set(nil, nil)
|
||||
expect(nil, []byte(""))
|
||||
|
||||
// Set empty value
|
||||
db.Set(nil, []byte(""))
|
||||
expect(nil, []byte(""))
|
||||
|
||||
// Set nil, Delete nil
|
||||
db.Set(nil, []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.Delete(nil)
|
||||
expect(nil, nil)
|
||||
|
||||
// Set nil, Delete empty
|
||||
db.Set(nil, []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.Delete([]byte(""))
|
||||
expect(nil, nil)
|
||||
|
||||
// Set empty, Delete nil
|
||||
db.Set([]byte(""), []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.Delete(nil)
|
||||
expect(nil, nil)
|
||||
|
||||
// Set empty, Delete empty
|
||||
db.Set([]byte(""), []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.Delete([]byte(""))
|
||||
expect(nil, nil)
|
||||
|
||||
// SetSync nil, DeleteSync nil
|
||||
db.SetSync(nil, []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.DeleteSync(nil)
|
||||
expect(nil, nil)
|
||||
|
||||
// SetSync nil, DeleteSync empty
|
||||
db.SetSync(nil, []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.DeleteSync([]byte(""))
|
||||
expect(nil, nil)
|
||||
|
||||
// SetSync empty, DeleteSync nil
|
||||
db.SetSync([]byte(""), []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.DeleteSync(nil)
|
||||
expect(nil, nil)
|
||||
|
||||
// SetSync empty, DeleteSync empty
|
||||
db.SetSync([]byte(""), []byte("abc"))
|
||||
expect(nil, []byte("abc"))
|
||||
db.DeleteSync([]byte(""))
|
||||
expect(nil, nil)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGoLevelDBBackend(t *testing.T) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
db := NewDB(name, GoLevelDBBackend, "")
|
||||
defer cleanupDBDir("", name)
|
||||
|
||||
_, ok := db.(*GoLevelDB)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestDBIterator(t *testing.T) {
|
||||
for dbType := range backends {
|
||||
t.Run(fmt.Sprintf("%v", dbType), func(t *testing.T) {
|
||||
testDBIterator(t, dbType)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testDBIterator(t *testing.T, backend DBBackendType) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
dir := os.TempDir()
|
||||
db := NewDB(name, backend, dir)
|
||||
defer cleanupDBDir(dir, name)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
if i != 6 { // but skip 6.
|
||||
db.Set(int642Bytes(int64(i)), nil)
|
||||
}
|
||||
}
|
||||
|
||||
verifyIterator(t, db.Iterator(nil, nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator")
|
||||
verifyIterator(t, db.ReverseIterator(nil, nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator")
|
||||
|
||||
verifyIterator(t, db.Iterator(nil, int642Bytes(0)), []int64(nil), "forward iterator to 0")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(10), nil), []int64(nil), "reverse iterator from 10 (ex)")
|
||||
|
||||
verifyIterator(t, db.Iterator(int642Bytes(0), nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(1), nil), []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1")
|
||||
verifyIterator(t, db.ReverseIterator(nil, int642Bytes(10)), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)")
|
||||
verifyIterator(t, db.ReverseIterator(nil, int642Bytes(9)), []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)")
|
||||
verifyIterator(t, db.ReverseIterator(nil, int642Bytes(8)), []int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)")
|
||||
|
||||
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "forward iterator from 5 to 6")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "forward iterator from 5 to 7")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(8)), []int64{5, 7}, "forward iterator from 5 to 8")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(7)), []int64(nil), "forward iterator from 6 to 7")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(8)), []int64{7}, "forward iterator from 6 to 8")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(7), int642Bytes(8)), []int64{7}, "forward iterator from 7 to 8")
|
||||
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(5)), []int64{4}, "reverse iterator from 5 (ex) to 4")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(6)), []int64{5, 4}, "reverse iterator from 6 (ex) to 4")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(7)), []int64{5, 4}, "reverse iterator from 7 (ex) to 4")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "reverse iterator from 6 (ex) to 5")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "reverse iterator from 7 (ex) to 5")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(7)), []int64(nil), "reverse iterator from 7 (ex) to 6")
|
||||
|
||||
verifyIterator(t, db.Iterator(int642Bytes(0), int642Bytes(1)), []int64{0}, "forward iterator from 0 to 1")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(8), int642Bytes(9)), []int64{8}, "reverse iterator from 9 (ex) to 8")
|
||||
|
||||
verifyIterator(t, db.Iterator(int642Bytes(2), int642Bytes(4)), []int64{2, 3}, "forward iterator from 2 to 4")
|
||||
verifyIterator(t, db.Iterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "forward iterator from 4 to 2")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(2), int642Bytes(4)), []int64{3, 2}, "reverse iterator from 4 (ex) to 2")
|
||||
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "reverse iterator from 2 (ex) to 4")
|
||||
|
||||
}
|
||||
|
||||
func verifyIterator(t *testing.T, itr Iterator, expected []int64, msg string) {
|
||||
var list []int64
|
||||
for itr.Valid() {
|
||||
list = append(list, bytes2Int64(itr.Key()))
|
||||
itr.Next()
|
||||
}
|
||||
assert.Equal(t, expected, list, msg)
|
||||
}
|
@@ -1,349 +0,0 @@
|
||||
// +build boltdb
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/etcd-io/bbolt"
|
||||
)
|
||||
|
||||
var bucket = []byte("tm")
|
||||
|
||||
func init() {
|
||||
registerDBCreator(BoltDBBackend, func(name, dir string) (DB, error) {
|
||||
return NewBoltDB(name, dir)
|
||||
}, false)
|
||||
}
|
||||
|
||||
// BoltDB is a wrapper around etcd's fork of bolt
|
||||
// (https://github.com/etcd-io/bbolt).
|
||||
//
|
||||
// NOTE: All operations (including Set, Delete) are synchronous by default. One
|
||||
// can globally turn it off by using NoSync config option (not recommended).
|
||||
//
|
||||
// A single bucket ([]byte("tm")) is used per a database instance. This could
|
||||
// lead to performance issues when/if there will be lots of keys.
|
||||
type BoltDB struct {
|
||||
db *bbolt.DB
|
||||
}
|
||||
|
||||
// NewBoltDB returns a BoltDB with default options.
|
||||
func NewBoltDB(name, dir string) (DB, error) {
|
||||
return NewBoltDBWithOpts(name, dir, bbolt.DefaultOptions)
|
||||
}
|
||||
|
||||
// NewBoltDBWithOpts allows you to supply *bbolt.Options. ReadOnly: true is not
|
||||
// supported because NewBoltDBWithOpts creates a global bucket.
|
||||
func NewBoltDBWithOpts(name string, dir string, opts *bbolt.Options) (DB, error) {
|
||||
if opts.ReadOnly {
|
||||
return nil, errors.New("ReadOnly: true is not supported")
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(dir, name+".db")
|
||||
db, err := bbolt.Open(dbPath, os.ModePerm, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create a global bucket
|
||||
err = db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(bucket)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BoltDB{db: db}, nil
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Get(key []byte) (value []byte) {
|
||||
key = nonEmptyKey(nonNilBytes(key))
|
||||
err := bdb.db.View(func(tx *bbolt.Tx) error {
|
||||
b := tx.Bucket(bucket)
|
||||
if v := b.Get(key); v != nil {
|
||||
value = append([]byte{}, v...)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Has(key []byte) bool {
|
||||
return bdb.Get(key) != nil
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Set(key, value []byte) {
|
||||
key = nonEmptyKey(nonNilBytes(key))
|
||||
value = nonNilBytes(value)
|
||||
err := bdb.db.Update(func(tx *bbolt.Tx) error {
|
||||
b := tx.Bucket(bucket)
|
||||
return b.Put(key, value)
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) SetSync(key, value []byte) {
|
||||
bdb.Set(key, value)
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Delete(key []byte) {
|
||||
key = nonEmptyKey(nonNilBytes(key))
|
||||
err := bdb.db.Update(func(tx *bbolt.Tx) error {
|
||||
return tx.Bucket(bucket).Delete(key)
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) DeleteSync(key []byte) {
|
||||
bdb.Delete(key)
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Close() {
|
||||
bdb.db.Close()
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Print() {
|
||||
stats := bdb.db.Stats()
|
||||
fmt.Printf("%v\n", stats)
|
||||
|
||||
err := bdb.db.View(func(tx *bbolt.Tx) error {
|
||||
tx.Bucket(bucket).ForEach(func(k, v []byte) error {
|
||||
fmt.Printf("[%X]:\t[%X]\n", k, v)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bdb *BoltDB) Stats() map[string]string {
|
||||
stats := bdb.db.Stats()
|
||||
m := make(map[string]string)
|
||||
|
||||
// Freelist stats
|
||||
m["FreePageN"] = fmt.Sprintf("%v", stats.FreePageN)
|
||||
m["PendingPageN"] = fmt.Sprintf("%v", stats.PendingPageN)
|
||||
m["FreeAlloc"] = fmt.Sprintf("%v", stats.FreeAlloc)
|
||||
m["FreelistInuse"] = fmt.Sprintf("%v", stats.FreelistInuse)
|
||||
|
||||
// Transaction stats
|
||||
m["TxN"] = fmt.Sprintf("%v", stats.TxN)
|
||||
m["OpenTxN"] = fmt.Sprintf("%v", stats.OpenTxN)
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// boltDBBatch stores key values in sync.Map and dumps them to the underlying
|
||||
// DB upon Write call.
|
||||
type boltDBBatch struct {
|
||||
db *BoltDB
|
||||
ops []operation
|
||||
}
|
||||
|
||||
// NewBatch returns a new batch.
|
||||
func (bdb *BoltDB) NewBatch() Batch {
|
||||
return &boltDBBatch{
|
||||
ops: nil,
|
||||
db: bdb,
|
||||
}
|
||||
}
|
||||
|
||||
// It is safe to modify the contents of the argument after Set returns but not
|
||||
// before.
|
||||
func (bdb *boltDBBatch) Set(key, value []byte) {
|
||||
bdb.ops = append(bdb.ops, operation{opTypeSet, key, value})
|
||||
}
|
||||
|
||||
// It is safe to modify the contents of the argument after Delete returns but
|
||||
// not before.
|
||||
func (bdb *boltDBBatch) Delete(key []byte) {
|
||||
bdb.ops = append(bdb.ops, operation{opTypeDelete, key, nil})
|
||||
}
|
||||
|
||||
// NOTE: the operation is synchronous (see BoltDB for reasons)
|
||||
func (bdb *boltDBBatch) Write() {
|
||||
err := bdb.db.db.Batch(func(tx *bbolt.Tx) error {
|
||||
b := tx.Bucket(bucket)
|
||||
for _, op := range bdb.ops {
|
||||
key := nonEmptyKey(nonNilBytes(op.key))
|
||||
switch op.opType {
|
||||
case opTypeSet:
|
||||
if putErr := b.Put(key, op.value); putErr != nil {
|
||||
return putErr
|
||||
}
|
||||
case opTypeDelete:
|
||||
if delErr := b.Delete(key); delErr != nil {
|
||||
return delErr
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bdb *boltDBBatch) WriteSync() {
|
||||
bdb.Write()
|
||||
}
|
||||
|
||||
func (bdb *boltDBBatch) Close() {}
|
||||
|
||||
// WARNING: Any concurrent writes or reads will block until the iterator is
|
||||
// closed.
|
||||
func (bdb *BoltDB) Iterator(start, end []byte) Iterator {
|
||||
tx, err := bdb.db.Begin(false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return newBoltDBIterator(tx, start, end, false)
|
||||
}
|
||||
|
||||
// WARNING: Any concurrent writes or reads will block until the iterator is
|
||||
// closed.
|
||||
func (bdb *BoltDB) ReverseIterator(start, end []byte) Iterator {
|
||||
tx, err := bdb.db.Begin(false)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return newBoltDBIterator(tx, start, end, true)
|
||||
}
|
||||
|
||||
// boltDBIterator allows you to iterate on range of keys/values given some
|
||||
// start / end keys (nil & nil will result in doing full scan).
|
||||
type boltDBIterator struct {
|
||||
tx *bbolt.Tx
|
||||
|
||||
itr *bbolt.Cursor
|
||||
start []byte
|
||||
end []byte
|
||||
|
||||
currentKey []byte
|
||||
currentValue []byte
|
||||
|
||||
isInvalid bool
|
||||
isReverse bool
|
||||
}
|
||||
|
||||
func newBoltDBIterator(tx *bbolt.Tx, start, end []byte, isReverse bool) *boltDBIterator {
|
||||
itr := tx.Bucket(bucket).Cursor()
|
||||
|
||||
var ck, cv []byte
|
||||
if isReverse {
|
||||
if end == nil {
|
||||
ck, cv = itr.Last()
|
||||
} else {
|
||||
_, _ = itr.Seek(end) // after key
|
||||
ck, cv = itr.Prev() // return to end key
|
||||
}
|
||||
} else {
|
||||
if start == nil {
|
||||
ck, cv = itr.First()
|
||||
} else {
|
||||
ck, cv = itr.Seek(start)
|
||||
}
|
||||
}
|
||||
|
||||
return &boltDBIterator{
|
||||
tx: tx,
|
||||
itr: itr,
|
||||
start: start,
|
||||
end: end,
|
||||
currentKey: ck,
|
||||
currentValue: cv,
|
||||
isReverse: isReverse,
|
||||
isInvalid: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Domain() ([]byte, []byte) {
|
||||
return itr.start, itr.end
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Valid() bool {
|
||||
if itr.isInvalid {
|
||||
return false
|
||||
}
|
||||
|
||||
// iterated to the end of the cursor
|
||||
if len(itr.currentKey) == 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
|
||||
if itr.isReverse {
|
||||
if itr.start != nil && bytes.Compare(itr.currentKey, itr.start) < 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if itr.end != nil && bytes.Compare(itr.end, itr.currentKey) <= 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Valid
|
||||
return true
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Next() {
|
||||
itr.assertIsValid()
|
||||
if itr.isReverse {
|
||||
itr.currentKey, itr.currentValue = itr.itr.Prev()
|
||||
} else {
|
||||
itr.currentKey, itr.currentValue = itr.itr.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Key() []byte {
|
||||
itr.assertIsValid()
|
||||
return append([]byte{}, itr.currentKey...)
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Value() []byte {
|
||||
itr.assertIsValid()
|
||||
var value []byte
|
||||
if itr.currentValue != nil {
|
||||
value = append([]byte{}, itr.currentValue...)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) Close() {
|
||||
err := itr.tx.Rollback()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *boltDBIterator) assertIsValid() {
|
||||
if !itr.Valid() {
|
||||
panic("Boltdb-iterator is invalid")
|
||||
}
|
||||
}
|
||||
|
||||
// nonEmptyKey returns a []byte("nil") if key is empty.
|
||||
// WARNING: this may collude with "nil" user key!
|
||||
func nonEmptyKey(key []byte) []byte {
|
||||
if len(key) == 0 {
|
||||
return []byte("nil")
|
||||
}
|
||||
return key
|
||||
}
|
@@ -1,37 +0,0 @@
|
||||
// +build boltdb
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func TestBoltDBNewBoltDB(t *testing.T) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
dir := os.TempDir()
|
||||
defer cleanupDBDir(dir, name)
|
||||
|
||||
db, err := NewBoltDB(name, dir)
|
||||
require.NoError(t, err)
|
||||
db.Close()
|
||||
}
|
||||
|
||||
func BenchmarkBoltDBRandomReadsWrites(b *testing.B) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
db, err := NewBoltDB(name, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
db.Close()
|
||||
cleanupDBDir("", name)
|
||||
}()
|
||||
|
||||
benchmarkRandomReadsWrites(b, db)
|
||||
}
|
@@ -1,325 +0,0 @@
|
||||
// +build cleveldb
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/jmhodges/levigo"
|
||||
)
|
||||
|
||||
func init() {
|
||||
dbCreator := func(name string, dir string) (DB, error) {
|
||||
return NewCLevelDB(name, dir)
|
||||
}
|
||||
registerDBCreator(CLevelDBBackend, dbCreator, false)
|
||||
}
|
||||
|
||||
var _ DB = (*CLevelDB)(nil)
|
||||
|
||||
type CLevelDB struct {
|
||||
db *levigo.DB
|
||||
ro *levigo.ReadOptions
|
||||
wo *levigo.WriteOptions
|
||||
woSync *levigo.WriteOptions
|
||||
}
|
||||
|
||||
func NewCLevelDB(name string, dir string) (*CLevelDB, error) {
|
||||
dbPath := filepath.Join(dir, name+".db")
|
||||
|
||||
opts := levigo.NewOptions()
|
||||
opts.SetCache(levigo.NewLRUCache(1 << 30))
|
||||
opts.SetCreateIfMissing(true)
|
||||
db, err := levigo.Open(dbPath, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ro := levigo.NewReadOptions()
|
||||
wo := levigo.NewWriteOptions()
|
||||
woSync := levigo.NewWriteOptions()
|
||||
woSync.SetSync(true)
|
||||
database := &CLevelDB{
|
||||
db: db,
|
||||
ro: ro,
|
||||
wo: wo,
|
||||
woSync: woSync,
|
||||
}
|
||||
return database, nil
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Get(key []byte) []byte {
|
||||
key = nonNilBytes(key)
|
||||
res, err := db.db.Get(db.ro, key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Has(key []byte) bool {
|
||||
return db.Get(key) != nil
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Set(key []byte, value []byte) {
|
||||
key = nonNilBytes(key)
|
||||
value = nonNilBytes(value)
|
||||
err := db.db.Put(db.wo, key, value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) SetSync(key []byte, value []byte) {
|
||||
key = nonNilBytes(key)
|
||||
value = nonNilBytes(value)
|
||||
err := db.db.Put(db.woSync, key, value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Delete(key []byte) {
|
||||
key = nonNilBytes(key)
|
||||
err := db.db.Delete(db.wo, key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) DeleteSync(key []byte) {
|
||||
key = nonNilBytes(key)
|
||||
err := db.db.Delete(db.woSync, key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (db *CLevelDB) DB() *levigo.DB {
|
||||
return db.db
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Close() {
|
||||
db.db.Close()
|
||||
db.ro.Close()
|
||||
db.wo.Close()
|
||||
db.woSync.Close()
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Print() {
|
||||
itr := db.Iterator(nil, nil)
|
||||
defer itr.Close()
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
key := itr.Key()
|
||||
value := itr.Value()
|
||||
fmt.Printf("[%X]:\t[%X]\n", key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) Stats() map[string]string {
|
||||
keys := []string{
|
||||
"leveldb.aliveiters",
|
||||
"leveldb.alivesnaps",
|
||||
"leveldb.blockpool",
|
||||
"leveldb.cachedblock",
|
||||
"leveldb.num-files-at-level{n}",
|
||||
"leveldb.openedtables",
|
||||
"leveldb.sstables",
|
||||
"leveldb.stats",
|
||||
}
|
||||
|
||||
stats := make(map[string]string, len(keys))
|
||||
for _, key := range keys {
|
||||
str := db.db.PropertyValue(key)
|
||||
stats[key] = str
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Batch
|
||||
|
||||
// Implements DB.
|
||||
func (db *CLevelDB) NewBatch() Batch {
|
||||
batch := levigo.NewWriteBatch()
|
||||
return &cLevelDBBatch{db, batch}
|
||||
}
|
||||
|
||||
type cLevelDBBatch struct {
|
||||
db *CLevelDB
|
||||
batch *levigo.WriteBatch
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *cLevelDBBatch) Set(key, value []byte) {
|
||||
mBatch.batch.Put(key, value)
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *cLevelDBBatch) Delete(key []byte) {
|
||||
mBatch.batch.Delete(key)
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *cLevelDBBatch) Write() {
|
||||
err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *cLevelDBBatch) WriteSync() {
|
||||
err := mBatch.db.db.Write(mBatch.db.woSync, mBatch.batch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *cLevelDBBatch) Close() {
|
||||
mBatch.batch.Close()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Iterator
|
||||
// NOTE This is almost identical to db/go_level_db.Iterator
|
||||
// Before creating a third version, refactor.
|
||||
|
||||
func (db *CLevelDB) Iterator(start, end []byte) Iterator {
|
||||
itr := db.db.NewIterator(db.ro)
|
||||
return newCLevelDBIterator(itr, start, end, false)
|
||||
}
|
||||
|
||||
func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator {
|
||||
itr := db.db.NewIterator(db.ro)
|
||||
return newCLevelDBIterator(itr, start, end, true)
|
||||
}
|
||||
|
||||
var _ Iterator = (*cLevelDBIterator)(nil)
|
||||
|
||||
type cLevelDBIterator struct {
|
||||
source *levigo.Iterator
|
||||
start, end []byte
|
||||
isReverse bool
|
||||
isInvalid bool
|
||||
}
|
||||
|
||||
func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator {
|
||||
if isReverse {
|
||||
if end == nil {
|
||||
source.SeekToLast()
|
||||
} else {
|
||||
source.Seek(end)
|
||||
if source.Valid() {
|
||||
eoakey := source.Key() // end or after key
|
||||
if bytes.Compare(end, eoakey) <= 0 {
|
||||
source.Prev()
|
||||
}
|
||||
} else {
|
||||
source.SeekToLast()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if start == nil {
|
||||
source.SeekToFirst()
|
||||
} else {
|
||||
source.Seek(start)
|
||||
}
|
||||
}
|
||||
return &cLevelDBIterator{
|
||||
source: source,
|
||||
start: start,
|
||||
end: end,
|
||||
isReverse: isReverse,
|
||||
isInvalid: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) Domain() ([]byte, []byte) {
|
||||
return itr.start, itr.end
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) Valid() bool {
|
||||
|
||||
// Once invalid, forever invalid.
|
||||
if itr.isInvalid {
|
||||
return false
|
||||
}
|
||||
|
||||
// Panic on DB error. No way to recover.
|
||||
itr.assertNoError()
|
||||
|
||||
// If source is invalid, invalid.
|
||||
if !itr.source.Valid() {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
|
||||
// If key is end or past it, invalid.
|
||||
var start = itr.start
|
||||
var end = itr.end
|
||||
var key = itr.source.Key()
|
||||
if itr.isReverse {
|
||||
if start != nil && bytes.Compare(key, start) < 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if end != nil && bytes.Compare(end, key) <= 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// It's valid.
|
||||
return true
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) Key() []byte {
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
return itr.source.Key()
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) Value() []byte {
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
return itr.source.Value()
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) Next() {
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
if itr.isReverse {
|
||||
itr.source.Prev()
|
||||
} else {
|
||||
itr.source.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) Close() {
|
||||
itr.source.Close()
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) assertNoError() {
|
||||
if err := itr.source.GetError(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (itr cLevelDBIterator) assertIsValid() {
|
||||
if !itr.Valid() {
|
||||
panic("cLevelDBIterator is invalid")
|
||||
}
|
||||
}
|
@@ -1,110 +0,0 @@
|
||||
// +build cleveldb
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func BenchmarkRandomReadsWrites2(b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
numItems := int64(1000000)
|
||||
internal := map[int64]int64{}
|
||||
for i := 0; i < int(numItems); i++ {
|
||||
internal[int64(i)] = int64(0)
|
||||
}
|
||||
db, err := NewCLevelDB(fmt.Sprintf("test_%x", cmn.RandStr(12)), "")
|
||||
if err != nil {
|
||||
b.Fatal(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("ok, starting")
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Write something
|
||||
{
|
||||
idx := (int64(cmn.RandInt()) % numItems)
|
||||
internal[idx]++
|
||||
val := internal[idx]
|
||||
idxBytes := int642Bytes(int64(idx))
|
||||
valBytes := int642Bytes(int64(val))
|
||||
//fmt.Printf("Set %X -> %X\n", idxBytes, valBytes)
|
||||
db.Set(
|
||||
idxBytes,
|
||||
valBytes,
|
||||
)
|
||||
}
|
||||
// Read something
|
||||
{
|
||||
idx := (int64(cmn.RandInt()) % numItems)
|
||||
val := internal[idx]
|
||||
idxBytes := int642Bytes(int64(idx))
|
||||
valBytes := db.Get(idxBytes)
|
||||
//fmt.Printf("Get %X -> %X\n", idxBytes, valBytes)
|
||||
if val == 0 {
|
||||
if !bytes.Equal(valBytes, nil) {
|
||||
b.Errorf("Expected %v for %v, got %X",
|
||||
nil, idx, valBytes)
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if len(valBytes) != 8 {
|
||||
b.Errorf("Expected length 8 for %v, got %X",
|
||||
idx, valBytes)
|
||||
break
|
||||
}
|
||||
valGot := bytes2Int64(valBytes)
|
||||
if val != valGot {
|
||||
b.Errorf("Expected %v for %v, got %v",
|
||||
val, idx, valGot)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
db.Close()
|
||||
}
|
||||
|
||||
/*
|
||||
func int642Bytes(i int64) []byte {
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, uint64(i))
|
||||
return buf
|
||||
}
|
||||
|
||||
func bytes2Int64(buf []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(buf))
|
||||
}
|
||||
*/
|
||||
|
||||
func TestCLevelDBBackend(t *testing.T) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
// Can't use "" (current directory) or "./" here because levigo.Open returns:
|
||||
// "Error initializing DB: IO error: test_XXX.db: Invalid argument"
|
||||
dir := os.TempDir()
|
||||
db := NewDB(name, CLevelDBBackend, dir)
|
||||
defer cleanupDBDir(dir, name)
|
||||
|
||||
_, ok := db.(*CLevelDB)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestCLevelDBStats(t *testing.T) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
dir := os.TempDir()
|
||||
db := NewDB(name, CLevelDBBackend, dir)
|
||||
defer cleanupDBDir(dir, name)
|
||||
|
||||
assert.NotEmpty(t, db.Stats())
|
||||
}
|
@@ -1,256 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
//----------------------------------------
|
||||
// Helper functions.
|
||||
|
||||
func checkValue(t *testing.T, db DB, key []byte, valueWanted []byte) {
|
||||
valueGot := db.Get(key)
|
||||
assert.Equal(t, valueWanted, valueGot)
|
||||
}
|
||||
|
||||
func checkValid(t *testing.T, itr Iterator, expected bool) {
|
||||
valid := itr.Valid()
|
||||
require.Equal(t, expected, valid)
|
||||
}
|
||||
|
||||
func checkNext(t *testing.T, itr Iterator, expected bool) {
|
||||
itr.Next()
|
||||
valid := itr.Valid()
|
||||
require.Equal(t, expected, valid)
|
||||
}
|
||||
|
||||
func checkNextPanics(t *testing.T, itr Iterator) {
|
||||
assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't")
|
||||
}
|
||||
|
||||
func checkDomain(t *testing.T, itr Iterator, start, end []byte) {
|
||||
ds, de := itr.Domain()
|
||||
assert.Equal(t, start, ds, "checkDomain domain start incorrect")
|
||||
assert.Equal(t, end, de, "checkDomain domain end incorrect")
|
||||
}
|
||||
|
||||
func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) {
|
||||
k, v := itr.Key(), itr.Value()
|
||||
assert.Exactly(t, key, k)
|
||||
assert.Exactly(t, value, v)
|
||||
}
|
||||
|
||||
func checkInvalid(t *testing.T, itr Iterator) {
|
||||
checkValid(t, itr, false)
|
||||
checkKeyPanics(t, itr)
|
||||
checkValuePanics(t, itr)
|
||||
checkNextPanics(t, itr)
|
||||
}
|
||||
|
||||
func checkKeyPanics(t *testing.T, itr Iterator) {
|
||||
assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't")
|
||||
}
|
||||
|
||||
func checkValuePanics(t *testing.T, itr Iterator) {
|
||||
assert.Panics(t, func() { itr.Value() }, "checkValuePanics expected panic but didn't")
|
||||
}
|
||||
|
||||
func newTempDB(t *testing.T, backend DBBackendType) (db DB, dbDir string) {
|
||||
dirname, err := ioutil.TempDir("", "db_common_test")
|
||||
require.Nil(t, err)
|
||||
return NewDB("testdb", backend, dirname), dirname
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// mockDB
|
||||
|
||||
// NOTE: not actually goroutine safe.
|
||||
// If you want something goroutine safe, maybe you just want a MemDB.
|
||||
type mockDB struct {
|
||||
mtx sync.Mutex
|
||||
calls map[string]int
|
||||
}
|
||||
|
||||
func newMockDB() *mockDB {
|
||||
return &mockDB{
|
||||
calls: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Mutex() *sync.Mutex {
|
||||
return &(mdb.mtx)
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Get([]byte) []byte {
|
||||
mdb.calls["Get"]++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Has([]byte) bool {
|
||||
mdb.calls["Has"]++
|
||||
return false
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Set([]byte, []byte) {
|
||||
mdb.calls["Set"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) SetSync([]byte, []byte) {
|
||||
mdb.calls["SetSync"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) SetNoLock([]byte, []byte) {
|
||||
mdb.calls["SetNoLock"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) SetNoLockSync([]byte, []byte) {
|
||||
mdb.calls["SetNoLockSync"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Delete([]byte) {
|
||||
mdb.calls["Delete"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) DeleteSync([]byte) {
|
||||
mdb.calls["DeleteSync"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) DeleteNoLock([]byte) {
|
||||
mdb.calls["DeleteNoLock"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) DeleteNoLockSync([]byte) {
|
||||
mdb.calls["DeleteNoLockSync"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Iterator(start, end []byte) Iterator {
|
||||
mdb.calls["Iterator"]++
|
||||
return &mockIterator{}
|
||||
}
|
||||
|
||||
func (mdb *mockDB) ReverseIterator(start, end []byte) Iterator {
|
||||
mdb.calls["ReverseIterator"]++
|
||||
return &mockIterator{}
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Close() {
|
||||
mdb.calls["Close"]++
|
||||
}
|
||||
|
||||
func (mdb *mockDB) NewBatch() Batch {
|
||||
mdb.calls["NewBatch"]++
|
||||
return &memBatch{db: mdb}
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Print() {
|
||||
mdb.calls["Print"]++
|
||||
fmt.Printf("mockDB{%v}", mdb.Stats())
|
||||
}
|
||||
|
||||
func (mdb *mockDB) Stats() map[string]string {
|
||||
mdb.calls["Stats"]++
|
||||
|
||||
res := make(map[string]string)
|
||||
for key, count := range mdb.calls {
|
||||
res[key] = fmt.Sprintf("%d", count)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// mockIterator
|
||||
|
||||
type mockIterator struct{}
|
||||
|
||||
func (mockIterator) Domain() (start []byte, end []byte) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (mockIterator) Valid() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (mockIterator) Next() {
|
||||
}
|
||||
|
||||
func (mockIterator) Key() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mockIterator) Value() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mockIterator) Close() {
|
||||
}
|
||||
|
||||
func benchmarkRandomReadsWrites(b *testing.B, db DB) {
|
||||
b.StopTimer()
|
||||
|
||||
// create dummy data
|
||||
const numItems = int64(1000000)
|
||||
internal := map[int64]int64{}
|
||||
for i := 0; i < int(numItems); i++ {
|
||||
internal[int64(i)] = int64(0)
|
||||
}
|
||||
|
||||
// fmt.Println("ok, starting")
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Write something
|
||||
{
|
||||
idx := int64(cmn.RandInt()) % numItems
|
||||
internal[idx]++
|
||||
val := internal[idx]
|
||||
idxBytes := int642Bytes(int64(idx))
|
||||
valBytes := int642Bytes(int64(val))
|
||||
//fmt.Printf("Set %X -> %X\n", idxBytes, valBytes)
|
||||
db.Set(idxBytes, valBytes)
|
||||
}
|
||||
|
||||
// Read something
|
||||
{
|
||||
idx := int64(cmn.RandInt()) % numItems
|
||||
valExp := internal[idx]
|
||||
idxBytes := int642Bytes(int64(idx))
|
||||
valBytes := db.Get(idxBytes)
|
||||
//fmt.Printf("Get %X -> %X\n", idxBytes, valBytes)
|
||||
if valExp == 0 {
|
||||
if !bytes.Equal(valBytes, nil) {
|
||||
b.Errorf("Expected %v for %v, got %X", nil, idx, valBytes)
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if len(valBytes) != 8 {
|
||||
b.Errorf("Expected length 8 for %v, got %X", idx, valBytes)
|
||||
break
|
||||
}
|
||||
valGot := bytes2Int64(valBytes)
|
||||
if valExp != valGot {
|
||||
b.Errorf("Expected %v for %v, got %v", valExp, idx, valGot)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func int642Bytes(i int64) []byte {
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, uint64(i))
|
||||
return buf
|
||||
}
|
||||
|
||||
func bytes2Int64(buf []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(buf))
|
||||
}
|
@@ -1,70 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type DBBackendType string
|
||||
|
||||
// These are valid backend types.
|
||||
const (
|
||||
// GoLevelDBBackend represents goleveldb (github.com/syndtr/goleveldb - most
|
||||
// popular implementation)
|
||||
// - pure go
|
||||
// - stable
|
||||
GoLevelDBBackend DBBackendType = "goleveldb"
|
||||
// CLevelDBBackend represents cleveldb (uses levigo wrapper)
|
||||
// - fast
|
||||
// - requires gcc
|
||||
// - use cleveldb build tag (go build -tags cleveldb)
|
||||
CLevelDBBackend DBBackendType = "cleveldb"
|
||||
// MemDBBackend represents in-memoty key value store, which is mostly used
|
||||
// for testing.
|
||||
MemDBBackend DBBackendType = "memdb"
|
||||
// FSDBBackend represents filesystem database
|
||||
// - EXPERIMENTAL
|
||||
// - slow
|
||||
FSDBBackend DBBackendType = "fsdb"
|
||||
// BoltDBBackend represents bolt (uses etcd's fork of bolt -
|
||||
// github.com/etcd-io/bbolt)
|
||||
// - EXPERIMENTAL
|
||||
// - may be faster is some use-cases (random reads - indexer)
|
||||
// - use boltdb build tag (go build -tags boltdb)
|
||||
BoltDBBackend DBBackendType = "boltdb"
|
||||
)
|
||||
|
||||
type dbCreator func(name string, dir string) (DB, error)
|
||||
|
||||
var backends = map[DBBackendType]dbCreator{}
|
||||
|
||||
func registerDBCreator(backend DBBackendType, creator dbCreator, force bool) {
|
||||
_, ok := backends[backend]
|
||||
if !force && ok {
|
||||
return
|
||||
}
|
||||
backends[backend] = creator
|
||||
}
|
||||
|
||||
// NewDB creates a new database of type backend with the given name.
|
||||
// NOTE: function panics if:
|
||||
// - backend is unknown (not registered)
|
||||
// - creator function, provided during registration, returns error
|
||||
func NewDB(name string, backend DBBackendType, dir string) DB {
|
||||
dbCreator, ok := backends[backend]
|
||||
if !ok {
|
||||
keys := make([]string, len(backends))
|
||||
i := 0
|
||||
for k := range backends {
|
||||
keys[i] = string(k)
|
||||
i++
|
||||
}
|
||||
panic(fmt.Sprintf("Unknown db_backend %s, expected either %s", backend, strings.Join(keys, " or ")))
|
||||
}
|
||||
|
||||
db, err := dbCreator(name, dir)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error initializing DB: %v", err))
|
||||
}
|
||||
return db
|
||||
}
|
@@ -1,194 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDBIteratorSingleKey(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
db.SetSync(bz("1"), bz("value_1"))
|
||||
itr := db.Iterator(nil, nil)
|
||||
|
||||
checkValid(t, itr, true)
|
||||
checkNext(t, itr, false)
|
||||
checkValid(t, itr, false)
|
||||
checkNextPanics(t, itr)
|
||||
|
||||
// Once invalid...
|
||||
checkInvalid(t, itr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBIteratorTwoKeys(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
db.SetSync(bz("1"), bz("value_1"))
|
||||
db.SetSync(bz("2"), bz("value_1"))
|
||||
|
||||
{ // Fail by calling Next too much
|
||||
itr := db.Iterator(nil, nil)
|
||||
checkValid(t, itr, true)
|
||||
|
||||
checkNext(t, itr, true)
|
||||
checkValid(t, itr, true)
|
||||
|
||||
checkNext(t, itr, false)
|
||||
checkValid(t, itr, false)
|
||||
|
||||
checkNextPanics(t, itr)
|
||||
|
||||
// Once invalid...
|
||||
checkInvalid(t, itr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBIteratorMany(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
keys := make([][]byte, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
keys[i] = []byte{byte(i)}
|
||||
}
|
||||
|
||||
value := []byte{5}
|
||||
for _, k := range keys {
|
||||
db.Set(k, value)
|
||||
}
|
||||
|
||||
itr := db.Iterator(nil, nil)
|
||||
defer itr.Close()
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
assert.Equal(t, db.Get(itr.Key()), itr.Value())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBIteratorEmpty(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
itr := db.Iterator(nil, nil)
|
||||
|
||||
checkInvalid(t, itr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBIteratorEmptyBeginAfter(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
itr := db.Iterator(bz("1"), nil)
|
||||
|
||||
checkInvalid(t, itr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBIteratorNonemptyBeginAfter(t *testing.T) {
|
||||
for backend := range backends {
|
||||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
|
||||
db, dir := newTempDB(t, backend)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
db.SetSync(bz("1"), bz("value_1"))
|
||||
itr := db.Iterator(bz("2"), nil)
|
||||
|
||||
checkInvalid(t, itr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBBatchWrite(t *testing.T) {
|
||||
testCases := []struct {
|
||||
modify func(batch Batch)
|
||||
calls map[string]int
|
||||
}{
|
||||
0: {
|
||||
func(batch Batch) {
|
||||
batch.Set(bz("1"), bz("1"))
|
||||
batch.Set(bz("2"), bz("2"))
|
||||
batch.Delete(bz("3"))
|
||||
batch.Set(bz("4"), bz("4"))
|
||||
batch.Write()
|
||||
},
|
||||
map[string]int{
|
||||
"Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0,
|
||||
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0,
|
||||
},
|
||||
},
|
||||
1: {
|
||||
func(batch Batch) {
|
||||
batch.Set(bz("1"), bz("1"))
|
||||
batch.Set(bz("2"), bz("2"))
|
||||
batch.Set(bz("4"), bz("4"))
|
||||
batch.Delete(bz("3"))
|
||||
batch.Write()
|
||||
},
|
||||
map[string]int{
|
||||
"Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0,
|
||||
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0,
|
||||
},
|
||||
},
|
||||
2: {
|
||||
func(batch Batch) {
|
||||
batch.Set(bz("1"), bz("1"))
|
||||
batch.Set(bz("2"), bz("2"))
|
||||
batch.Delete(bz("3"))
|
||||
batch.Set(bz("4"), bz("4"))
|
||||
batch.WriteSync()
|
||||
},
|
||||
map[string]int{
|
||||
"Set": 0, "SetSync": 0, "SetNoLock": 2, "SetNoLockSync": 1,
|
||||
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0,
|
||||
},
|
||||
},
|
||||
3: {
|
||||
func(batch Batch) {
|
||||
batch.Set(bz("1"), bz("1"))
|
||||
batch.Set(bz("2"), bz("2"))
|
||||
batch.Set(bz("4"), bz("4"))
|
||||
batch.Delete(bz("3"))
|
||||
batch.WriteSync()
|
||||
},
|
||||
map[string]int{
|
||||
"Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0,
|
||||
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 0, "DeleteNoLockSync": 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
mdb := newMockDB()
|
||||
batch := mdb.NewBatch()
|
||||
|
||||
tc.modify(batch)
|
||||
|
||||
for call, exp := range tc.calls {
|
||||
got := mdb.calls[call]
|
||||
assert.Equal(t, exp, got, "#%v - key: %s", i, call)
|
||||
}
|
||||
}
|
||||
}
|
270
libs/db/fsdb.go
270
libs/db/fsdb.go
@@ -1,270 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
const (
|
||||
keyPerm = os.FileMode(0600)
|
||||
dirPerm = os.FileMode(0700)
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerDBCreator(FSDBBackend, func(name, dir string) (DB, error) {
|
||||
dbPath := filepath.Join(dir, name+".db")
|
||||
return NewFSDB(dbPath), nil
|
||||
}, false)
|
||||
}
|
||||
|
||||
var _ DB = (*FSDB)(nil)
|
||||
|
||||
// It's slow.
|
||||
type FSDB struct {
|
||||
mtx sync.Mutex
|
||||
dir string
|
||||
}
|
||||
|
||||
func NewFSDB(dir string) *FSDB {
|
||||
err := os.MkdirAll(dir, dirPerm)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "Creating FSDB dir "+dir))
|
||||
}
|
||||
database := &FSDB{
|
||||
dir: dir,
|
||||
}
|
||||
return database
|
||||
}
|
||||
|
||||
func (db *FSDB) Get(key []byte) []byte {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
key = escapeKey(key)
|
||||
|
||||
path := db.nameToPath(key)
|
||||
value, err := read(path)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key))
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (db *FSDB) Has(key []byte) bool {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
key = escapeKey(key)
|
||||
|
||||
path := db.nameToPath(key)
|
||||
return cmn.FileExists(path)
|
||||
}
|
||||
|
||||
func (db *FSDB) Set(key []byte, value []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.SetNoLock(key, value)
|
||||
}
|
||||
|
||||
func (db *FSDB) SetSync(key []byte, value []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.SetNoLock(key, value)
|
||||
}
|
||||
|
||||
// NOTE: Implements atomicSetDeleter.
|
||||
func (db *FSDB) SetNoLock(key []byte, value []byte) {
|
||||
key = escapeKey(key)
|
||||
value = nonNilBytes(value)
|
||||
path := db.nameToPath(key)
|
||||
err := write(path, value)
|
||||
if err != nil {
|
||||
panic(errors.Wrapf(err, "Setting key %s (0x%X)", string(key), key))
|
||||
}
|
||||
}
|
||||
|
||||
func (db *FSDB) Delete(key []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.DeleteNoLock(key)
|
||||
}
|
||||
|
||||
func (db *FSDB) DeleteSync(key []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.DeleteNoLock(key)
|
||||
}
|
||||
|
||||
// NOTE: Implements atomicSetDeleter.
|
||||
func (db *FSDB) DeleteNoLock(key []byte) {
|
||||
key = escapeKey(key)
|
||||
path := db.nameToPath(key)
|
||||
err := remove(path)
|
||||
if os.IsNotExist(err) {
|
||||
return
|
||||
} else if err != nil {
|
||||
panic(errors.Wrapf(err, "Removing key %s (0x%X)", string(key), key))
|
||||
}
|
||||
}
|
||||
|
||||
func (db *FSDB) Close() {
|
||||
// Nothing to do.
|
||||
}
|
||||
|
||||
func (db *FSDB) Print() {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
panic("FSDB.Print not yet implemented")
|
||||
}
|
||||
|
||||
func (db *FSDB) Stats() map[string]string {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
panic("FSDB.Stats not yet implemented")
|
||||
}
|
||||
|
||||
func (db *FSDB) NewBatch() Batch {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
// Not sure we would ever want to try...
|
||||
// It doesn't seem easy for general filesystems.
|
||||
panic("FSDB.NewBatch not yet implemented")
|
||||
}
|
||||
|
||||
func (db *FSDB) Mutex() *sync.Mutex {
|
||||
return &(db.mtx)
|
||||
}
|
||||
|
||||
func (db *FSDB) Iterator(start, end []byte) Iterator {
|
||||
return db.MakeIterator(start, end, false)
|
||||
}
|
||||
|
||||
func (db *FSDB) MakeIterator(start, end []byte, isReversed bool) Iterator {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
// We need a copy of all of the keys.
|
||||
// Not the best, but probably not a bottleneck depending.
|
||||
keys, err := list(db.dir, start, end)
|
||||
if err != nil {
|
||||
panic(errors.Wrapf(err, "Listing keys in %s", db.dir))
|
||||
}
|
||||
if isReversed {
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(keys)))
|
||||
} else {
|
||||
sort.Strings(keys)
|
||||
}
|
||||
return newMemDBIterator(db, keys, start, end)
|
||||
}
|
||||
|
||||
func (db *FSDB) ReverseIterator(start, end []byte) Iterator {
|
||||
return db.MakeIterator(start, end, true)
|
||||
}
|
||||
|
||||
func (db *FSDB) nameToPath(name []byte) string {
|
||||
n := url.PathEscape(string(name))
|
||||
return filepath.Join(db.dir, n)
|
||||
}
|
||||
|
||||
// Read some bytes to a file.
|
||||
// CONTRACT: returns os errors directly without wrapping.
|
||||
func read(path string) ([]byte, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
d, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Write some bytes from a file.
|
||||
// CONTRACT: returns os errors directly without wrapping.
|
||||
func write(path string, d []byte) error {
|
||||
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, keyPerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
// fInfo, err := f.Stat()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if fInfo.Mode() != keyPerm {
|
||||
// return tmerrors.NewErrPermissionsChanged(f.Name(), keyPerm, fInfo.Mode())
|
||||
// }
|
||||
_, err = f.Write(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.Sync()
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove a file.
|
||||
// CONTRACT: returns os errors directly without wrapping.
|
||||
func remove(path string) error {
|
||||
return os.Remove(path)
|
||||
}
|
||||
|
||||
// List keys in a directory, stripping of escape sequences and dir portions.
|
||||
// CONTRACT: returns os errors directly without wrapping.
|
||||
func list(dirPath string, start, end []byte) ([]string, error) {
|
||||
dir, err := os.Open(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer dir.Close()
|
||||
|
||||
names, err := dir.Readdirnames(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var keys []string
|
||||
for _, name := range names {
|
||||
n, err := url.PathUnescape(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to unescape %s while listing", name)
|
||||
}
|
||||
key := unescapeKey([]byte(n))
|
||||
if IsKeyInDomain(key, start, end) {
|
||||
keys = append(keys, string(key))
|
||||
}
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// To support empty or nil keys, while the file system doesn't allow empty
|
||||
// filenames.
|
||||
func escapeKey(key []byte) []byte {
|
||||
return []byte("k_" + string(key))
|
||||
}
|
||||
func unescapeKey(escKey []byte) []byte {
|
||||
if len(escKey) < 2 {
|
||||
panic(fmt.Sprintf("Invalid esc key: %x", escKey))
|
||||
}
|
||||
if string(escKey[:2]) != "k_" {
|
||||
panic(fmt.Sprintf("Invalid esc key: %x", escKey))
|
||||
}
|
||||
return escKey[2:]
|
||||
}
|
@@ -1,333 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
func init() {
|
||||
dbCreator := func(name string, dir string) (DB, error) {
|
||||
return NewGoLevelDB(name, dir)
|
||||
}
|
||||
registerDBCreator(GoLevelDBBackend, dbCreator, false)
|
||||
}
|
||||
|
||||
var _ DB = (*GoLevelDB)(nil)
|
||||
|
||||
type GoLevelDB struct {
|
||||
db *leveldb.DB
|
||||
}
|
||||
|
||||
func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) {
|
||||
return NewGoLevelDBWithOpts(name, dir, nil)
|
||||
}
|
||||
|
||||
func NewGoLevelDBWithOpts(name string, dir string, o *opt.Options) (*GoLevelDB, error) {
|
||||
dbPath := filepath.Join(dir, name+".db")
|
||||
db, err := leveldb.OpenFile(dbPath, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
database := &GoLevelDB{
|
||||
db: db,
|
||||
}
|
||||
return database, nil
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Get(key []byte) []byte {
|
||||
key = nonNilBytes(key)
|
||||
res, err := db.db.Get(key, nil)
|
||||
if err != nil {
|
||||
if err == errors.ErrNotFound {
|
||||
return nil
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Has(key []byte) bool {
|
||||
return db.Get(key) != nil
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Set(key []byte, value []byte) {
|
||||
key = nonNilBytes(key)
|
||||
value = nonNilBytes(value)
|
||||
err := db.db.Put(key, value, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) SetSync(key []byte, value []byte) {
|
||||
key = nonNilBytes(key)
|
||||
value = nonNilBytes(value)
|
||||
err := db.db.Put(key, value, &opt.WriteOptions{Sync: true})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Delete(key []byte) {
|
||||
key = nonNilBytes(key)
|
||||
err := db.db.Delete(key, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) DeleteSync(key []byte) {
|
||||
key = nonNilBytes(key)
|
||||
err := db.db.Delete(key, &opt.WriteOptions{Sync: true})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (db *GoLevelDB) DB() *leveldb.DB {
|
||||
return db.db
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Close() {
|
||||
db.db.Close()
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Print() {
|
||||
str, _ := db.db.GetProperty("leveldb.stats")
|
||||
fmt.Printf("%v\n", str)
|
||||
|
||||
itr := db.db.NewIterator(nil, nil)
|
||||
for itr.Next() {
|
||||
key := itr.Key()
|
||||
value := itr.Value()
|
||||
fmt.Printf("[%X]:\t[%X]\n", key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Stats() map[string]string {
|
||||
keys := []string{
|
||||
"leveldb.num-files-at-level{n}",
|
||||
"leveldb.stats",
|
||||
"leveldb.sstables",
|
||||
"leveldb.blockpool",
|
||||
"leveldb.cachedblock",
|
||||
"leveldb.openedtables",
|
||||
"leveldb.alivesnaps",
|
||||
"leveldb.aliveiters",
|
||||
}
|
||||
|
||||
stats := make(map[string]string)
|
||||
for _, key := range keys {
|
||||
str, err := db.db.GetProperty(key)
|
||||
if err == nil {
|
||||
stats[key] = str
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Batch
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) NewBatch() Batch {
|
||||
batch := new(leveldb.Batch)
|
||||
return &goLevelDBBatch{db, batch}
|
||||
}
|
||||
|
||||
type goLevelDBBatch struct {
|
||||
db *GoLevelDB
|
||||
batch *leveldb.Batch
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *goLevelDBBatch) Set(key, value []byte) {
|
||||
mBatch.batch.Put(key, value)
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *goLevelDBBatch) Delete(key []byte) {
|
||||
mBatch.batch.Delete(key)
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *goLevelDBBatch) Write() {
|
||||
err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: false})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
func (mBatch *goLevelDBBatch) WriteSync() {
|
||||
err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: true})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Batch.
|
||||
// Close is no-op for goLevelDBBatch.
|
||||
func (mBatch *goLevelDBBatch) Close() {}
|
||||
|
||||
//----------------------------------------
|
||||
// Iterator
|
||||
// NOTE This is almost identical to db/c_level_db.Iterator
|
||||
// Before creating a third version, refactor.
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) Iterator(start, end []byte) Iterator {
|
||||
itr := db.db.NewIterator(nil, nil)
|
||||
return newGoLevelDBIterator(itr, start, end, false)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator {
|
||||
itr := db.db.NewIterator(nil, nil)
|
||||
return newGoLevelDBIterator(itr, start, end, true)
|
||||
}
|
||||
|
||||
type goLevelDBIterator struct {
|
||||
source iterator.Iterator
|
||||
start []byte
|
||||
end []byte
|
||||
isReverse bool
|
||||
isInvalid bool
|
||||
}
|
||||
|
||||
var _ Iterator = (*goLevelDBIterator)(nil)
|
||||
|
||||
func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator {
|
||||
if isReverse {
|
||||
if end == nil {
|
||||
source.Last()
|
||||
} else {
|
||||
valid := source.Seek(end)
|
||||
if valid {
|
||||
eoakey := source.Key() // end or after key
|
||||
if bytes.Compare(end, eoakey) <= 0 {
|
||||
source.Prev()
|
||||
}
|
||||
} else {
|
||||
source.Last()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if start == nil {
|
||||
source.First()
|
||||
} else {
|
||||
source.Seek(start)
|
||||
}
|
||||
}
|
||||
return &goLevelDBIterator{
|
||||
source: source,
|
||||
start: start,
|
||||
end: end,
|
||||
isReverse: isReverse,
|
||||
isInvalid: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *goLevelDBIterator) Domain() ([]byte, []byte) {
|
||||
return itr.start, itr.end
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *goLevelDBIterator) Valid() bool {
|
||||
|
||||
// Once invalid, forever invalid.
|
||||
if itr.isInvalid {
|
||||
return false
|
||||
}
|
||||
|
||||
// Panic on DB error. No way to recover.
|
||||
itr.assertNoError()
|
||||
|
||||
// If source is invalid, invalid.
|
||||
if !itr.source.Valid() {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
|
||||
// If key is end or past it, invalid.
|
||||
var start = itr.start
|
||||
var end = itr.end
|
||||
var key = itr.source.Key()
|
||||
|
||||
if itr.isReverse {
|
||||
if start != nil && bytes.Compare(key, start) < 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if end != nil && bytes.Compare(end, key) <= 0 {
|
||||
itr.isInvalid = true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Valid
|
||||
return true
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *goLevelDBIterator) Key() []byte {
|
||||
// Key returns a copy of the current key.
|
||||
// See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
return cp(itr.source.Key())
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *goLevelDBIterator) Value() []byte {
|
||||
// Value returns a copy of the current value.
|
||||
// See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
return cp(itr.source.Value())
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *goLevelDBIterator) Next() {
|
||||
itr.assertNoError()
|
||||
itr.assertIsValid()
|
||||
if itr.isReverse {
|
||||
itr.source.Prev()
|
||||
} else {
|
||||
itr.source.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *goLevelDBIterator) Close() {
|
||||
itr.source.Release()
|
||||
}
|
||||
|
||||
func (itr *goLevelDBIterator) assertNoError() {
|
||||
if err := itr.source.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (itr goLevelDBIterator) assertIsValid() {
|
||||
if !itr.Valid() {
|
||||
panic("goLevelDBIterator is invalid")
|
||||
}
|
||||
}
|
@@ -1,45 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func TestGoLevelDBNewGoLevelDB(t *testing.T) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
defer cleanupDBDir("", name)
|
||||
|
||||
// Test we can't open the db twice for writing
|
||||
wr1, err := NewGoLevelDB(name, "")
|
||||
require.Nil(t, err)
|
||||
_, err = NewGoLevelDB(name, "")
|
||||
require.NotNil(t, err)
|
||||
wr1.Close() // Close the db to release the lock
|
||||
|
||||
// Test we can open the db twice for reading only
|
||||
ro1, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true})
|
||||
defer ro1.Close()
|
||||
require.Nil(t, err)
|
||||
ro2, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true})
|
||||
defer ro2.Close()
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func BenchmarkGoLevelDBRandomReadsWrites(b *testing.B) {
|
||||
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
|
||||
db, err := NewGoLevelDB(name, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
db.Close()
|
||||
cleanupDBDir("", name)
|
||||
}()
|
||||
|
||||
benchmarkRandomReadsWrites(b, db)
|
||||
}
|
@@ -1,74 +0,0 @@
|
||||
package db
|
||||
|
||||
import "sync"
|
||||
|
||||
type atomicSetDeleter interface {
|
||||
Mutex() *sync.Mutex
|
||||
SetNoLock(key, value []byte)
|
||||
SetNoLockSync(key, value []byte)
|
||||
DeleteNoLock(key []byte)
|
||||
DeleteNoLockSync(key []byte)
|
||||
}
|
||||
|
||||
type memBatch struct {
|
||||
db atomicSetDeleter
|
||||
ops []operation
|
||||
}
|
||||
|
||||
type opType int
|
||||
|
||||
const (
|
||||
opTypeSet opType = 1
|
||||
opTypeDelete opType = 2
|
||||
)
|
||||
|
||||
type operation struct {
|
||||
opType
|
||||
key []byte
|
||||
value []byte
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) Set(key, value []byte) {
|
||||
mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value})
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) Delete(key []byte) {
|
||||
mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil})
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) Write() {
|
||||
mBatch.write(false)
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) WriteSync() {
|
||||
mBatch.write(true)
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) Close() {
|
||||
mBatch.ops = nil
|
||||
}
|
||||
|
||||
func (mBatch *memBatch) write(doSync bool) {
|
||||
if mtx := mBatch.db.Mutex(); mtx != nil {
|
||||
mtx.Lock()
|
||||
defer mtx.Unlock()
|
||||
}
|
||||
|
||||
for i, op := range mBatch.ops {
|
||||
if doSync && i == (len(mBatch.ops)-1) {
|
||||
switch op.opType {
|
||||
case opTypeSet:
|
||||
mBatch.db.SetNoLockSync(op.key, op.value)
|
||||
case opTypeDelete:
|
||||
mBatch.db.DeleteNoLockSync(op.key)
|
||||
}
|
||||
break // we're done.
|
||||
}
|
||||
switch op.opType {
|
||||
case opTypeSet:
|
||||
mBatch.db.SetNoLock(op.key, op.value)
|
||||
case opTypeDelete:
|
||||
mBatch.db.DeleteNoLock(op.key)
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,255 +0,0 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerDBCreator(MemDBBackend, func(name, dir string) (DB, error) {
|
||||
return NewMemDB(), nil
|
||||
}, false)
|
||||
}
|
||||
|
||||
var _ DB = (*MemDB)(nil)
|
||||
|
||||
type MemDB struct {
|
||||
mtx sync.Mutex
|
||||
db map[string][]byte
|
||||
}
|
||||
|
||||
func NewMemDB() *MemDB {
|
||||
database := &MemDB{
|
||||
db: make(map[string][]byte),
|
||||
}
|
||||
return database
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (db *MemDB) Mutex() *sync.Mutex {
|
||||
return &(db.mtx)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Get(key []byte) []byte {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
key = nonNilBytes(key)
|
||||
|
||||
value := db.db[string(key)]
|
||||
return value
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Has(key []byte) bool {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
key = nonNilBytes(key)
|
||||
|
||||
_, ok := db.db[string(key)]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Set(key []byte, value []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.SetNoLock(key, value)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) SetSync(key []byte, value []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.SetNoLock(key, value)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (db *MemDB) SetNoLock(key []byte, value []byte) {
|
||||
db.SetNoLockSync(key, value)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (db *MemDB) SetNoLockSync(key []byte, value []byte) {
|
||||
key = nonNilBytes(key)
|
||||
value = nonNilBytes(value)
|
||||
|
||||
db.db[string(key)] = value
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Delete(key []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.DeleteNoLock(key)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) DeleteSync(key []byte) {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
db.DeleteNoLock(key)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (db *MemDB) DeleteNoLock(key []byte) {
|
||||
db.DeleteNoLockSync(key)
|
||||
}
|
||||
|
||||
// Implements atomicSetDeleter.
|
||||
func (db *MemDB) DeleteNoLockSync(key []byte) {
|
||||
key = nonNilBytes(key)
|
||||
|
||||
delete(db.db, string(key))
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Close() {
|
||||
// Close is a noop since for an in-memory
|
||||
// database, we don't have a destination
|
||||
// to flush contents to nor do we want
|
||||
// any data loss on invoking Close()
|
||||
// See the discussion in https://github.com/tendermint/tendermint/libs/pull/56
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Print() {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
for key, value := range db.db {
|
||||
fmt.Printf("[%X]:\t[%X]\n", []byte(key), value)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Stats() map[string]string {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
stats := make(map[string]string)
|
||||
stats["database.type"] = "memDB"
|
||||
stats["database.size"] = fmt.Sprintf("%d", len(db.db))
|
||||
return stats
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) NewBatch() Batch {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
return &memBatch{db, nil}
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Iterator
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) Iterator(start, end []byte) Iterator {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
keys := db.getSortedKeys(start, end, false)
|
||||
return newMemDBIterator(db, keys, start, end)
|
||||
}
|
||||
|
||||
// Implements DB.
|
||||
func (db *MemDB) ReverseIterator(start, end []byte) Iterator {
|
||||
db.mtx.Lock()
|
||||
defer db.mtx.Unlock()
|
||||
|
||||
keys := db.getSortedKeys(start, end, true)
|
||||
return newMemDBIterator(db, keys, start, end)
|
||||
}
|
||||
|
||||
// We need a copy of all of the keys.
|
||||
// Not the best, but probably not a bottleneck depending.
|
||||
type memDBIterator struct {
|
||||
db DB
|
||||
cur int
|
||||
keys []string
|
||||
start []byte
|
||||
end []byte
|
||||
}
|
||||
|
||||
var _ Iterator = (*memDBIterator)(nil)
|
||||
|
||||
// Keys is expected to be in reverse order for reverse iterators.
|
||||
func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator {
|
||||
return &memDBIterator{
|
||||
db: db,
|
||||
cur: 0,
|
||||
keys: keys,
|
||||
start: start,
|
||||
end: end,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *memDBIterator) Domain() ([]byte, []byte) {
|
||||
return itr.start, itr.end
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *memDBIterator) Valid() bool {
|
||||
return 0 <= itr.cur && itr.cur < len(itr.keys)
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *memDBIterator) Next() {
|
||||
itr.assertIsValid()
|
||||
itr.cur++
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *memDBIterator) Key() []byte {
|
||||
itr.assertIsValid()
|
||||
return []byte(itr.keys[itr.cur])
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *memDBIterator) Value() []byte {
|
||||
itr.assertIsValid()
|
||||
key := []byte(itr.keys[itr.cur])
|
||||
return itr.db.Get(key)
|
||||
}
|
||||
|
||||
// Implements Iterator.
|
||||
func (itr *memDBIterator) Close() {
|
||||
itr.keys = nil
|
||||
itr.db = nil
|
||||
}
|
||||
|
||||
func (itr *memDBIterator) assertIsValid() {
|
||||
if !itr.Valid() {
|
||||
panic("memDBIterator is invalid")
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Misc.
|
||||
|
||||
func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string {
|
||||
keys := []string{}
|
||||
for key := range db.db {
|
||||
inDomain := IsKeyInDomain([]byte(key), start, end)
|
||||
if inDomain {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
}
|
||||
sort.Strings(keys)
|
||||
if reverse {
|
||||
nkeys := len(keys)
|
||||
for i := 0; i < nkeys/2; i++ {
|
||||
temp := keys[i]
|
||||
keys[i] = keys[nkeys-i-1]
|
||||
keys[nkeys-i-1] = temp
|
||||
}
|
||||
}
|
||||
return keys
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user